ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
Revision: 1587
Committed: Fri Jul 8 20:25:32 2011 UTC (13 years, 9 months ago) by gezelter
Original Path: branches/development/src/parallel/ForceMatrixDecomposition.cpp
File size: 38778 byte(s)
Log Message:
Fixes

File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
15 * distribution.
16 *
17 * This software is provided "AS IS," without a warranty of any
18 * kind. All express or implied conditions, representations and
19 * warranties, including any implied warranty of merchantability,
20 * fitness for a particular purpose or non-infringement, are hereby
21 * excluded. The University of Notre Dame and its licensors shall not
22 * be liable for any damages suffered by licensee as a result of
23 * using, modifying or distributing the software or its
24 * derivatives. In no event will the University of Notre Dame or its
25 * licensors be liable for any lost revenue, profit or data, or for
26 * direct, indirect, special, consequential, incidental or punitive
27 * damages, however caused and regardless of the theory of liability,
28 * arising out of the use of or inability to use software, even if the
29 * University of Notre Dame has been advised of the possibility of
30 * such damages.
31 *
32 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 * research, please cite the appropriate papers when you publish your
34 * work. Good starting points are:
35 *
36 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 * [4] Vardeman & Gezelter, in progress (2009).
40 */
41 #include "parallel/ForceMatrixDecomposition.hpp"
42 #include "math/SquareMatrix3.hpp"
43 #include "nonbonded/NonBondedInteraction.hpp"
44 #include "brains/SnapshotManager.hpp"
45 #include "brains/PairList.hpp"
46
47 using namespace std;
48 namespace OpenMD {
49
50 /**
51 * distributeInitialData is essentially a copy of the older fortran
52 * SimulationSetup
53 */
54
55 void ForceMatrixDecomposition::distributeInitialData() {
56 snap_ = sman_->getCurrentSnapshot();
57 storageLayout_ = sman_->getStorageLayout();
58 ff_ = info_->getForceField();
59 nLocal_ = snap_->getNumberOfAtoms();
60
61 nGroups_ = info_->getNLocalCutoffGroups();
62 // gather the information for atomtype IDs (atids):
63 idents = info_->getIdentArray();
64 AtomLocalToGlobal = info_->getGlobalAtomIndices();
65 cgLocalToGlobal = info_->getGlobalGroupIndices();
66 vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
67
68 massFactors = info_->getMassFactors();
69
70 PairList* excludes = info_->getExcludedInteractions();
71 PairList* oneTwo = info_->getOneTwoInteractions();
72 PairList* oneThree = info_->getOneThreeInteractions();
73 PairList* oneFour = info_->getOneFourInteractions();
74
75 #ifdef IS_MPI
76
77 AtomCommIntRow = new Communicator<Row,int>(nLocal_);
78 AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
79 AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
80 AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
81 AtomCommPotRow = new Communicator<Row,potVec>(nLocal_);
82
83 AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
84 AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
85 AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
86 AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
87 AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_);
88
89 cgCommIntRow = new Communicator<Row,int>(nGroups_);
90 cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
91 cgCommIntColumn = new Communicator<Column,int>(nGroups_);
92 cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
93
94 nAtomsInRow_ = AtomCommIntRow->getSize();
95 nAtomsInCol_ = AtomCommIntColumn->getSize();
96 nGroupsInRow_ = cgCommIntRow->getSize();
97 nGroupsInCol_ = cgCommIntColumn->getSize();
98
99 // Modify the data storage objects with the correct layouts and sizes:
100 atomRowData.resize(nAtomsInRow_);
101 atomRowData.setStorageLayout(storageLayout_);
102 atomColData.resize(nAtomsInCol_);
103 atomColData.setStorageLayout(storageLayout_);
104 cgRowData.resize(nGroupsInRow_);
105 cgRowData.setStorageLayout(DataStorage::dslPosition);
106 cgColData.resize(nGroupsInCol_);
107 cgColData.setStorageLayout(DataStorage::dslPosition);
108
109 identsRow.resize(nAtomsInRow_);
110 identsCol.resize(nAtomsInCol_);
111
112 AtomCommIntRow->gather(idents, identsRow);
113 AtomCommIntColumn->gather(idents, identsCol);
114
115 AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
116 AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
117
118 cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
119 cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
120
121 AtomCommRealRow->gather(massFactors, massFactorsRow);
122 AtomCommRealColumn->gather(massFactors, massFactorsCol);
123
124 groupListRow_.clear();
125 groupListRow_.resize(nGroupsInRow_);
126 for (int i = 0; i < nGroupsInRow_; i++) {
127 int gid = cgRowToGlobal[i];
128 for (int j = 0; j < nAtomsInRow_; j++) {
129 int aid = AtomRowToGlobal[j];
130 if (globalGroupMembership[aid] == gid)
131 groupListRow_[i].push_back(j);
132 }
133 }
134
135 groupListCol_.clear();
136 groupListCol_.resize(nGroupsInCol_);
137 for (int i = 0; i < nGroupsInCol_; i++) {
138 int gid = cgColToGlobal[i];
139 for (int j = 0; j < nAtomsInCol_; j++) {
140 int aid = AtomColToGlobal[j];
141 if (globalGroupMembership[aid] == gid)
142 groupListCol_[i].push_back(j);
143 }
144 }
145
146 excludesForAtom.clear();
147 excludesForAtom.resize(nAtomsInRow_);
148 toposForAtom.clear();
149 toposForAtom.resize(nAtomsInRow_);
150 topoDist.clear();
151 topoDist.resize(nAtomsInRow_);
152 for (int i = 0; i < nAtomsInRow_; i++) {
153 int iglob = AtomRowToGlobal[i];
154
155 for (int j = 0; j < nAtomsInCol_; j++) {
156 int jglob = AtomColToGlobal[j];
157
158 if (excludes->hasPair(iglob, jglob))
159 excludesForAtom[i].push_back(j);
160
161 if (oneTwo->hasPair(iglob, jglob)) {
162 toposForAtom[i].push_back(j);
163 topoDist[i].push_back(1);
164 } else {
165 if (oneThree->hasPair(iglob, jglob)) {
166 toposForAtom[i].push_back(j);
167 topoDist[i].push_back(2);
168 } else {
169 if (oneFour->hasPair(iglob, jglob)) {
170 toposForAtom[i].push_back(j);
171 topoDist[i].push_back(3);
172 }
173 }
174 }
175 }
176 }
177
178 #endif
179
180 groupList_.clear();
181 groupList_.resize(nGroups_);
182 for (int i = 0; i < nGroups_; i++) {
183 int gid = cgLocalToGlobal[i];
184 for (int j = 0; j < nLocal_; j++) {
185 int aid = AtomLocalToGlobal[j];
186 if (globalGroupMembership[aid] == gid) {
187 groupList_[i].push_back(j);
188 }
189 }
190 }
191
192 excludesForAtom.clear();
193 excludesForAtom.resize(nLocal_);
194 toposForAtom.clear();
195 toposForAtom.resize(nLocal_);
196 topoDist.clear();
197 topoDist.resize(nLocal_);
198
199 for (int i = 0; i < nLocal_; i++) {
200 int iglob = AtomLocalToGlobal[i];
201
202 for (int j = 0; j < nLocal_; j++) {
203 int jglob = AtomLocalToGlobal[j];
204
205 if (excludes->hasPair(iglob, jglob))
206 excludesForAtom[i].push_back(j);
207
208 if (oneTwo->hasPair(iglob, jglob)) {
209 toposForAtom[i].push_back(j);
210 topoDist[i].push_back(1);
211 } else {
212 if (oneThree->hasPair(iglob, jglob)) {
213 toposForAtom[i].push_back(j);
214 topoDist[i].push_back(2);
215 } else {
216 if (oneFour->hasPair(iglob, jglob)) {
217 toposForAtom[i].push_back(j);
218 topoDist[i].push_back(3);
219 }
220 }
221 }
222 }
223 }
224
225 createGtypeCutoffMap();
226
227 }
228
229 void ForceMatrixDecomposition::createGtypeCutoffMap() {
230
231 RealType tol = 1e-6;
232 RealType rc;
233 int atid;
234 set<AtomType*> atypes = info_->getSimulatedAtomTypes();
235 map<int, RealType> atypeCutoff;
236
237 for (set<AtomType*>::iterator at = atypes.begin();
238 at != atypes.end(); ++at){
239 atid = (*at)->getIdent();
240 if (userChoseCutoff_)
241 atypeCutoff[atid] = userCutoff_;
242 else
243 atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
244 }
245
246 vector<RealType> gTypeCutoffs;
247 // first we do a single loop over the cutoff groups to find the
248 // largest cutoff for any atypes present in this group.
249 #ifdef IS_MPI
250 vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
251 groupRowToGtype.resize(nGroupsInRow_);
252 for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
253 vector<int> atomListRow = getAtomsInGroupRow(cg1);
254 for (vector<int>::iterator ia = atomListRow.begin();
255 ia != atomListRow.end(); ++ia) {
256 int atom1 = (*ia);
257 atid = identsRow[atom1];
258 if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
259 groupCutoffRow[cg1] = atypeCutoff[atid];
260 }
261 }
262
263 bool gTypeFound = false;
264 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
265 if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
266 groupRowToGtype[cg1] = gt;
267 gTypeFound = true;
268 }
269 }
270 if (!gTypeFound) {
271 gTypeCutoffs.push_back( groupCutoffRow[cg1] );
272 groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
273 }
274
275 }
276 vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
277 groupColToGtype.resize(nGroupsInCol_);
278 for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
279 vector<int> atomListCol = getAtomsInGroupColumn(cg2);
280 for (vector<int>::iterator jb = atomListCol.begin();
281 jb != atomListCol.end(); ++jb) {
282 int atom2 = (*jb);
283 atid = identsCol[atom2];
284 if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
285 groupCutoffCol[cg2] = atypeCutoff[atid];
286 }
287 }
288 bool gTypeFound = false;
289 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
290 if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
291 groupColToGtype[cg2] = gt;
292 gTypeFound = true;
293 }
294 }
295 if (!gTypeFound) {
296 gTypeCutoffs.push_back( groupCutoffCol[cg2] );
297 groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
298 }
299 }
300 #else
301
302 vector<RealType> groupCutoff(nGroups_, 0.0);
303 groupToGtype.resize(nGroups_);
304 for (int cg1 = 0; cg1 < nGroups_; cg1++) {
305
306 groupCutoff[cg1] = 0.0;
307 vector<int> atomList = getAtomsInGroupRow(cg1);
308
309 for (vector<int>::iterator ia = atomList.begin();
310 ia != atomList.end(); ++ia) {
311 int atom1 = (*ia);
312 atid = idents[atom1];
313 if (atypeCutoff[atid] > groupCutoff[cg1]) {
314 groupCutoff[cg1] = atypeCutoff[atid];
315 }
316 }
317
318 bool gTypeFound = false;
319 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
320 if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
321 groupToGtype[cg1] = gt;
322 gTypeFound = true;
323 }
324 }
325 if (!gTypeFound) {
326 gTypeCutoffs.push_back( groupCutoff[cg1] );
327 groupToGtype[cg1] = gTypeCutoffs.size() - 1;
328 }
329 }
330 #endif
331
332 // Now we find the maximum group cutoff value present in the simulation
333
334 RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end());
335
336 #ifdef IS_MPI
337 MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX);
338 #endif
339
340 RealType tradRcut = groupMax;
341
342 for (int i = 0; i < gTypeCutoffs.size(); i++) {
343 for (int j = 0; j < gTypeCutoffs.size(); j++) {
344 RealType thisRcut;
345 switch(cutoffPolicy_) {
346 case TRADITIONAL:
347 thisRcut = tradRcut;
348 break;
349 case MIX:
350 thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
351 break;
352 case MAX:
353 thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
354 break;
355 default:
356 sprintf(painCave.errMsg,
357 "ForceMatrixDecomposition::createGtypeCutoffMap "
358 "hit an unknown cutoff policy!\n");
359 painCave.severity = OPENMD_ERROR;
360 painCave.isFatal = 1;
361 simError();
362 break;
363 }
364
365 pair<int,int> key = make_pair(i,j);
366 gTypeCutoffMap[key].first = thisRcut;
367
368 if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
369
370 gTypeCutoffMap[key].second = thisRcut*thisRcut;
371
372 gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
373
374 // sanity check
375
376 if (userChoseCutoff_) {
377 if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
378 sprintf(painCave.errMsg,
379 "ForceMatrixDecomposition::createGtypeCutoffMap "
380 "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
381 painCave.severity = OPENMD_ERROR;
382 painCave.isFatal = 1;
383 simError();
384 }
385 }
386 }
387 }
388 }
389
390
391 groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
392 int i, j;
393 #ifdef IS_MPI
394 i = groupRowToGtype[cg1];
395 j = groupColToGtype[cg2];
396 #else
397 i = groupToGtype[cg1];
398 j = groupToGtype[cg2];
399 #endif
400 return gTypeCutoffMap[make_pair(i,j)];
401 }
402
403 int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
404 for (int j = 0; j < toposForAtom[atom1].size(); j++) {
405 if (toposForAtom[atom1][j] == atom2)
406 return topoDist[atom1][j];
407 }
408 return 0;
409 }
410
411 void ForceMatrixDecomposition::zeroWorkArrays() {
412 pairwisePot = 0.0;
413 embeddingPot = 0.0;
414
415 #ifdef IS_MPI
416 if (storageLayout_ & DataStorage::dslForce) {
417 fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
418 fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
419 }
420
421 if (storageLayout_ & DataStorage::dslTorque) {
422 fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
423 fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
424 }
425
426 fill(pot_row.begin(), pot_row.end(),
427 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
428
429 fill(pot_col.begin(), pot_col.end(),
430 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
431
432 if (storageLayout_ & DataStorage::dslParticlePot) {
433 fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0);
434 fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0);
435 }
436
437 if (storageLayout_ & DataStorage::dslDensity) {
438 fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
439 fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
440 }
441
442 if (storageLayout_ & DataStorage::dslFunctional) {
443 fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0);
444 fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0);
445 }
446
447 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
448 fill(atomRowData.functionalDerivative.begin(),
449 atomRowData.functionalDerivative.end(), 0.0);
450 fill(atomColData.functionalDerivative.begin(),
451 atomColData.functionalDerivative.end(), 0.0);
452 }
453
454 if (storageLayout_ & DataStorage::dslSkippedCharge) {
455 fill(atomRowData.skippedCharge.begin(),
456 atomRowData.skippedCharge.end(), 0.0);
457 fill(atomColData.skippedCharge.begin(),
458 atomColData.skippedCharge.end(), 0.0);
459 }
460
461 #else
462
463 if (storageLayout_ & DataStorage::dslParticlePot) {
464 fill(snap_->atomData.particlePot.begin(),
465 snap_->atomData.particlePot.end(), 0.0);
466 }
467
468 if (storageLayout_ & DataStorage::dslDensity) {
469 fill(snap_->atomData.density.begin(),
470 snap_->atomData.density.end(), 0.0);
471 }
472 if (storageLayout_ & DataStorage::dslFunctional) {
473 fill(snap_->atomData.functional.begin(),
474 snap_->atomData.functional.end(), 0.0);
475 }
476 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
477 fill(snap_->atomData.functionalDerivative.begin(),
478 snap_->atomData.functionalDerivative.end(), 0.0);
479 }
480 if (storageLayout_ & DataStorage::dslSkippedCharge) {
481 fill(snap_->atomData.skippedCharge.begin(),
482 snap_->atomData.skippedCharge.end(), 0.0);
483 }
484 #endif
485
486 }
487
488
489 void ForceMatrixDecomposition::distributeData() {
490 snap_ = sman_->getCurrentSnapshot();
491 storageLayout_ = sman_->getStorageLayout();
492 #ifdef IS_MPI
493
494 // gather up the atomic positions
495 AtomCommVectorRow->gather(snap_->atomData.position,
496 atomRowData.position);
497 AtomCommVectorColumn->gather(snap_->atomData.position,
498 atomColData.position);
499
500 // gather up the cutoff group positions
501 cgCommVectorRow->gather(snap_->cgData.position,
502 cgRowData.position);
503 cgCommVectorColumn->gather(snap_->cgData.position,
504 cgColData.position);
505
506 // if needed, gather the atomic rotation matrices
507 if (storageLayout_ & DataStorage::dslAmat) {
508 AtomCommMatrixRow->gather(snap_->atomData.aMat,
509 atomRowData.aMat);
510 AtomCommMatrixColumn->gather(snap_->atomData.aMat,
511 atomColData.aMat);
512 }
513
514 // if needed, gather the atomic eletrostatic frames
515 if (storageLayout_ & DataStorage::dslElectroFrame) {
516 AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
517 atomRowData.electroFrame);
518 AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
519 atomColData.electroFrame);
520 }
521 #endif
522 }
523
524 /* collects information obtained during the pre-pair loop onto local
525 * data structures.
526 */
527 void ForceMatrixDecomposition::collectIntermediateData() {
528 snap_ = sman_->getCurrentSnapshot();
529 storageLayout_ = sman_->getStorageLayout();
530 #ifdef IS_MPI
531
532 if (storageLayout_ & DataStorage::dslDensity) {
533
534 AtomCommRealRow->scatter(atomRowData.density,
535 snap_->atomData.density);
536
537 int n = snap_->atomData.density.size();
538 vector<RealType> rho_tmp(n, 0.0);
539 AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
540 for (int i = 0; i < n; i++)
541 snap_->atomData.density[i] += rho_tmp[i];
542 }
543 #endif
544 }
545
546 /*
547 * redistributes information obtained during the pre-pair loop out to
548 * row and column-indexed data structures
549 */
550 void ForceMatrixDecomposition::distributeIntermediateData() {
551 snap_ = sman_->getCurrentSnapshot();
552 storageLayout_ = sman_->getStorageLayout();
553 #ifdef IS_MPI
554 if (storageLayout_ & DataStorage::dslFunctional) {
555 AtomCommRealRow->gather(snap_->atomData.functional,
556 atomRowData.functional);
557 AtomCommRealColumn->gather(snap_->atomData.functional,
558 atomColData.functional);
559 }
560
561 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
562 AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
563 atomRowData.functionalDerivative);
564 AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
565 atomColData.functionalDerivative);
566 }
567 #endif
568 }
569
570
571 void ForceMatrixDecomposition::collectData() {
572 snap_ = sman_->getCurrentSnapshot();
573 storageLayout_ = sman_->getStorageLayout();
574 #ifdef IS_MPI
575 int n = snap_->atomData.force.size();
576 vector<Vector3d> frc_tmp(n, V3Zero);
577
578 AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
579 for (int i = 0; i < n; i++) {
580 snap_->atomData.force[i] += frc_tmp[i];
581 frc_tmp[i] = 0.0;
582 }
583
584 AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
585 for (int i = 0; i < n; i++)
586 snap_->atomData.force[i] += frc_tmp[i];
587
588
589 if (storageLayout_ & DataStorage::dslTorque) {
590
591 int nt = snap_->atomData.torque.size();
592 vector<Vector3d> trq_tmp(nt, V3Zero);
593
594 AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
595 for (int i = 0; i < nt; i++) {
596 snap_->atomData.torque[i] += trq_tmp[i];
597 trq_tmp[i] = 0.0;
598 }
599
600 AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
601 for (int i = 0; i < nt; i++)
602 snap_->atomData.torque[i] += trq_tmp[i];
603 }
604
605 if (storageLayout_ & DataStorage::dslSkippedCharge) {
606
607 int ns = snap_->atomData.skippedCharge.size();
608 vector<RealType> skch_tmp(ns, 0.0);
609
610 AtomCommRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
611 for (int i = 0; i < ns; i++) {
612 snap_->atomData.skippedCharge[i] = skch_tmp[i];
613 skch_tmp[i] = 0.0;
614 }
615
616 AtomCommRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
617 for (int i = 0; i < ns; i++)
618 snap_->atomData.skippedCharge[i] += skch_tmp[i];
619 }
620
621 nLocal_ = snap_->getNumberOfAtoms();
622
623 vector<potVec> pot_temp(nLocal_,
624 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
625
626 // scatter/gather pot_row into the members of my column
627
628 AtomCommPotRow->scatter(pot_row, pot_temp);
629
630 for (int ii = 0; ii < pot_temp.size(); ii++ )
631 pairwisePot += pot_temp[ii];
632
633 fill(pot_temp.begin(), pot_temp.end(),
634 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
635
636 AtomCommPotColumn->scatter(pot_col, pot_temp);
637
638 for (int ii = 0; ii < pot_temp.size(); ii++ )
639 pairwisePot += pot_temp[ii];
640 #endif
641
642 }
643
644 int ForceMatrixDecomposition::getNAtomsInRow() {
645 #ifdef IS_MPI
646 return nAtomsInRow_;
647 #else
648 return nLocal_;
649 #endif
650 }
651
652 /**
653 * returns the list of atoms belonging to this group.
654 */
655 vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
656 #ifdef IS_MPI
657 return groupListRow_[cg1];
658 #else
659 return groupList_[cg1];
660 #endif
661 }
662
663 vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
664 #ifdef IS_MPI
665 return groupListCol_[cg2];
666 #else
667 return groupList_[cg2];
668 #endif
669 }
670
671 Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
672 Vector3d d;
673
674 #ifdef IS_MPI
675 d = cgColData.position[cg2] - cgRowData.position[cg1];
676 #else
677 d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
678 #endif
679
680 snap_->wrapVector(d);
681 return d;
682 }
683
684
685 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
686
687 Vector3d d;
688
689 #ifdef IS_MPI
690 d = cgRowData.position[cg1] - atomRowData.position[atom1];
691 #else
692 d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
693 #endif
694
695 snap_->wrapVector(d);
696 return d;
697 }
698
699 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
700 Vector3d d;
701
702 #ifdef IS_MPI
703 d = cgColData.position[cg2] - atomColData.position[atom2];
704 #else
705 d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
706 #endif
707
708 snap_->wrapVector(d);
709 return d;
710 }
711
712 RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
713 #ifdef IS_MPI
714 return massFactorsRow[atom1];
715 #else
716 return massFactors[atom1];
717 #endif
718 }
719
720 RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
721 #ifdef IS_MPI
722 return massFactorsCol[atom2];
723 #else
724 return massFactors[atom2];
725 #endif
726
727 }
728
729 Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
730 Vector3d d;
731
732 #ifdef IS_MPI
733 d = atomColData.position[atom2] - atomRowData.position[atom1];
734 #else
735 d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
736 #endif
737
738 snap_->wrapVector(d);
739 return d;
740 }
741
742 vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
743 return excludesForAtom[atom1];
744 }
745
746 /**
747 * We need to exclude some overcounted interactions that result from
748 * the parallel decomposition.
749 */
750 bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
751 int unique_id_1, unique_id_2;
752
753 #ifdef IS_MPI
754 // in MPI, we have to look up the unique IDs for each atom
755 unique_id_1 = AtomRowToGlobal[atom1];
756 unique_id_2 = AtomColToGlobal[atom2];
757
758 // this situation should only arise in MPI simulations
759 if (unique_id_1 == unique_id_2) return true;
760
761 // this prevents us from doing the pair on multiple processors
762 if (unique_id_1 < unique_id_2) {
763 if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
764 } else {
765 if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
766 }
767 #endif
768 return false;
769 }
770
771 /**
772 * We need to handle the interactions for atoms who are involved in
773 * the same rigid body as well as some short range interactions
774 * (bonds, bends, torsions) differently from other interactions.
775 * We'll still visit the pairwise routines, but with a flag that
776 * tells those routines to exclude the pair from direct long range
777 * interactions. Some indirect interactions (notably reaction
778 * field) must still be handled for these pairs.
779 */
780 bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
781 int unique_id_2;
782
783 #ifdef IS_MPI
784 // in MPI, we have to look up the unique IDs for the row atom.
785 unique_id_2 = AtomColToGlobal[atom2];
786 #else
787 // in the normal loop, the atom numbers are unique
788 unique_id_2 = atom2;
789 #endif
790
791 for (vector<int>::iterator i = excludesForAtom[atom1].begin();
792 i != excludesForAtom[atom1].end(); ++i) {
793 if ( (*i) == unique_id_2 ) return true;
794 }
795
796 return false;
797 }
798
799
800 void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
801 #ifdef IS_MPI
802 atomRowData.force[atom1] += fg;
803 #else
804 snap_->atomData.force[atom1] += fg;
805 #endif
806 }
807
808 void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
809 #ifdef IS_MPI
810 atomColData.force[atom2] += fg;
811 #else
812 snap_->atomData.force[atom2] += fg;
813 #endif
814 }
815
816 // filling interaction blocks with pointers
817 void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
818 int atom1, int atom2) {
819
820 idat.excluded = excludeAtomPair(atom1, atom2);
821
822 #ifdef IS_MPI
823
824 idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
825 ff_->getAtomType(identsCol[atom2]) );
826
827 if (storageLayout_ & DataStorage::dslAmat) {
828 idat.A1 = &(atomRowData.aMat[atom1]);
829 idat.A2 = &(atomColData.aMat[atom2]);
830 }
831
832 if (storageLayout_ & DataStorage::dslElectroFrame) {
833 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
834 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
835 }
836
837 if (storageLayout_ & DataStorage::dslTorque) {
838 idat.t1 = &(atomRowData.torque[atom1]);
839 idat.t2 = &(atomColData.torque[atom2]);
840 }
841
842 if (storageLayout_ & DataStorage::dslDensity) {
843 idat.rho1 = &(atomRowData.density[atom1]);
844 idat.rho2 = &(atomColData.density[atom2]);
845 }
846
847 if (storageLayout_ & DataStorage::dslFunctional) {
848 idat.frho1 = &(atomRowData.functional[atom1]);
849 idat.frho2 = &(atomColData.functional[atom2]);
850 }
851
852 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
853 idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
854 idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
855 }
856
857 if (storageLayout_ & DataStorage::dslParticlePot) {
858 idat.particlePot1 = &(atomRowData.particlePot[atom1]);
859 idat.particlePot2 = &(atomColData.particlePot[atom2]);
860 }
861
862 if (storageLayout_ & DataStorage::dslSkippedCharge) {
863 idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
864 idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
865 }
866
867 #else
868
869 idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
870 ff_->getAtomType(idents[atom2]) );
871
872 if (storageLayout_ & DataStorage::dslAmat) {
873 idat.A1 = &(snap_->atomData.aMat[atom1]);
874 idat.A2 = &(snap_->atomData.aMat[atom2]);
875 }
876
877 if (storageLayout_ & DataStorage::dslElectroFrame) {
878 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
879 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
880 }
881
882 if (storageLayout_ & DataStorage::dslTorque) {
883 idat.t1 = &(snap_->atomData.torque[atom1]);
884 idat.t2 = &(snap_->atomData.torque[atom2]);
885 }
886
887 if (storageLayout_ & DataStorage::dslDensity) {
888 idat.rho1 = &(snap_->atomData.density[atom1]);
889 idat.rho2 = &(snap_->atomData.density[atom2]);
890 }
891
892 if (storageLayout_ & DataStorage::dslFunctional) {
893 idat.frho1 = &(snap_->atomData.functional[atom1]);
894 idat.frho2 = &(snap_->atomData.functional[atom2]);
895 }
896
897 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
898 idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
899 idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
900 }
901
902 if (storageLayout_ & DataStorage::dslParticlePot) {
903 idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
904 idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
905 }
906
907 if (storageLayout_ & DataStorage::dslSkippedCharge) {
908 idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
909 idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
910 }
911 #endif
912 }
913
914
915 void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {
916 #ifdef IS_MPI
917 pot_row[atom1] += 0.5 * *(idat.pot);
918 pot_col[atom2] += 0.5 * *(idat.pot);
919
920 atomRowData.force[atom1] += *(idat.f1);
921 atomColData.force[atom2] -= *(idat.f1);
922 #else
923 pairwisePot += *(idat.pot);
924
925 snap_->atomData.force[atom1] += *(idat.f1);
926 snap_->atomData.force[atom2] -= *(idat.f1);
927 #endif
928
929 }
930
931 /*
932 * buildNeighborList
933 *
934 * first element of pair is row-indexed CutoffGroup
935 * second element of pair is column-indexed CutoffGroup
936 */
937 vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
938
939 vector<pair<int, int> > neighborList;
940 groupCutoffs cuts;
941 bool doAllPairs = false;
942
943 #ifdef IS_MPI
944 cellListRow_.clear();
945 cellListCol_.clear();
946 #else
947 cellList_.clear();
948 #endif
949
950 RealType rList_ = (largestRcut_ + skinThickness_);
951 RealType rl2 = rList_ * rList_;
952 Snapshot* snap_ = sman_->getCurrentSnapshot();
953 Mat3x3d Hmat = snap_->getHmat();
954 Vector3d Hx = Hmat.getColumn(0);
955 Vector3d Hy = Hmat.getColumn(1);
956 Vector3d Hz = Hmat.getColumn(2);
957
958 nCells_.x() = (int) ( Hx.length() )/ rList_;
959 nCells_.y() = (int) ( Hy.length() )/ rList_;
960 nCells_.z() = (int) ( Hz.length() )/ rList_;
961
962 // handle small boxes where the cell offsets can end up repeating cells
963
964 if (nCells_.x() < 3) doAllPairs = true;
965 if (nCells_.y() < 3) doAllPairs = true;
966 if (nCells_.z() < 3) doAllPairs = true;
967
968 Mat3x3d invHmat = snap_->getInvHmat();
969 Vector3d rs, scaled, dr;
970 Vector3i whichCell;
971 int cellIndex;
972 int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
973
974 #ifdef IS_MPI
975 cellListRow_.resize(nCtot);
976 cellListCol_.resize(nCtot);
977 #else
978 cellList_.resize(nCtot);
979 #endif
980
981 if (!doAllPairs) {
982 #ifdef IS_MPI
983
984 for (int i = 0; i < nGroupsInRow_; i++) {
985 rs = cgRowData.position[i];
986
987 // scaled positions relative to the box vectors
988 scaled = invHmat * rs;
989
990 // wrap the vector back into the unit box by subtracting integer box
991 // numbers
992 for (int j = 0; j < 3; j++) {
993 scaled[j] -= roundMe(scaled[j]);
994 scaled[j] += 0.5;
995 }
996
997 // find xyz-indices of cell that cutoffGroup is in.
998 whichCell.x() = nCells_.x() * scaled.x();
999 whichCell.y() = nCells_.y() * scaled.y();
1000 whichCell.z() = nCells_.z() * scaled.z();
1001
1002 // find single index of this cell:
1003 cellIndex = Vlinear(whichCell, nCells_);
1004
1005 // add this cutoff group to the list of groups in this cell;
1006 cellListRow_[cellIndex].push_back(i);
1007 }
1008
1009 for (int i = 0; i < nGroupsInCol_; i++) {
1010 rs = cgColData.position[i];
1011
1012 // scaled positions relative to the box vectors
1013 scaled = invHmat * rs;
1014
1015 // wrap the vector back into the unit box by subtracting integer box
1016 // numbers
1017 for (int j = 0; j < 3; j++) {
1018 scaled[j] -= roundMe(scaled[j]);
1019 scaled[j] += 0.5;
1020 }
1021
1022 // find xyz-indices of cell that cutoffGroup is in.
1023 whichCell.x() = nCells_.x() * scaled.x();
1024 whichCell.y() = nCells_.y() * scaled.y();
1025 whichCell.z() = nCells_.z() * scaled.z();
1026
1027 // find single index of this cell:
1028 cellIndex = Vlinear(whichCell, nCells_);
1029
1030 // add this cutoff group to the list of groups in this cell;
1031 cellListCol_[cellIndex].push_back(i);
1032 }
1033 #else
1034 for (int i = 0; i < nGroups_; i++) {
1035 rs = snap_->cgData.position[i];
1036
1037 // scaled positions relative to the box vectors
1038 scaled = invHmat * rs;
1039
1040 // wrap the vector back into the unit box by subtracting integer box
1041 // numbers
1042 for (int j = 0; j < 3; j++) {
1043 scaled[j] -= roundMe(scaled[j]);
1044 scaled[j] += 0.5;
1045 }
1046
1047 // find xyz-indices of cell that cutoffGroup is in.
1048 whichCell.x() = nCells_.x() * scaled.x();
1049 whichCell.y() = nCells_.y() * scaled.y();
1050 whichCell.z() = nCells_.z() * scaled.z();
1051
1052 // find single index of this cell:
1053 cellIndex = Vlinear(whichCell, nCells_);
1054
1055 // add this cutoff group to the list of groups in this cell;
1056 cellList_[cellIndex].push_back(i);
1057 }
1058 #endif
1059
1060 for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1061 for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1062 for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1063 Vector3i m1v(m1x, m1y, m1z);
1064 int m1 = Vlinear(m1v, nCells_);
1065
1066 for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1067 os != cellOffsets_.end(); ++os) {
1068
1069 Vector3i m2v = m1v + (*os);
1070
1071 if (m2v.x() >= nCells_.x()) {
1072 m2v.x() = 0;
1073 } else if (m2v.x() < 0) {
1074 m2v.x() = nCells_.x() - 1;
1075 }
1076
1077 if (m2v.y() >= nCells_.y()) {
1078 m2v.y() = 0;
1079 } else if (m2v.y() < 0) {
1080 m2v.y() = nCells_.y() - 1;
1081 }
1082
1083 if (m2v.z() >= nCells_.z()) {
1084 m2v.z() = 0;
1085 } else if (m2v.z() < 0) {
1086 m2v.z() = nCells_.z() - 1;
1087 }
1088
1089 int m2 = Vlinear (m2v, nCells_);
1090
1091 #ifdef IS_MPI
1092 for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1093 j1 != cellListRow_[m1].end(); ++j1) {
1094 for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1095 j2 != cellListCol_[m2].end(); ++j2) {
1096
1097 // Always do this if we're in different cells or if
1098 // we're in the same cell and the global index of the
1099 // j2 cutoff group is less than the j1 cutoff group
1100
1101 if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
1102 dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1103 snap_->wrapVector(dr);
1104 cuts = getGroupCutoffs( (*j1), (*j2) );
1105 if (dr.lengthSquare() < cuts.third) {
1106 neighborList.push_back(make_pair((*j1), (*j2)));
1107 }
1108 }
1109 }
1110 }
1111 #else
1112
1113 for (vector<int>::iterator j1 = cellList_[m1].begin();
1114 j1 != cellList_[m1].end(); ++j1) {
1115 for (vector<int>::iterator j2 = cellList_[m2].begin();
1116 j2 != cellList_[m2].end(); ++j2) {
1117
1118 // Always do this if we're in different cells or if
1119 // we're in the same cell and the global index of the
1120 // j2 cutoff group is less than the j1 cutoff group
1121
1122 if (m2 != m1 || (*j2) < (*j1)) {
1123 dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1124 snap_->wrapVector(dr);
1125 cuts = getGroupCutoffs( (*j1), (*j2) );
1126 if (dr.lengthSquare() < cuts.third) {
1127 neighborList.push_back(make_pair((*j1), (*j2)));
1128 }
1129 }
1130 }
1131 }
1132 #endif
1133 }
1134 }
1135 }
1136 }
1137 } else {
1138 // branch to do all cutoff group pairs
1139 #ifdef IS_MPI
1140 for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1141 for (int j2 = 0; j2 < nGroupsInCol_; j2++) {
1142 dr = cgColData.position[j2] - cgRowData.position[j1];
1143 snap_->wrapVector(dr);
1144 cuts = getGroupCutoffs( j1, j2 );
1145 if (dr.lengthSquare() < cuts.third) {
1146 neighborList.push_back(make_pair(j1, j2));
1147 }
1148 }
1149 }
1150 #else
1151 for (int j1 = 0; j1 < nGroups_ - 1; j1++) {
1152 for (int j2 = j1 + 1; j2 < nGroups_; j2++) {
1153 dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1154 snap_->wrapVector(dr);
1155 cuts = getGroupCutoffs( j1, j2 );
1156 if (dr.lengthSquare() < cuts.third) {
1157 neighborList.push_back(make_pair(j1, j2));
1158 }
1159 }
1160 }
1161 #endif
1162 }
1163
1164 // save the local cutoff group positions for the check that is
1165 // done on each loop:
1166 saved_CG_positions_.clear();
1167 for (int i = 0; i < nGroups_; i++)
1168 saved_CG_positions_.push_back(snap_->cgData.position[i]);
1169
1170 return neighborList;
1171 }
1172 } //end namespace OpenMD