ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
Revision: 1688
Committed: Wed Mar 14 17:56:01 2012 UTC (13 years, 1 month ago) by gezelter
Original Path: branches/development/src/parallel/ForceMatrixDecomposition.cpp
File size: 42598 byte(s)
Log Message:
Bug fixes for GB.  Now using strength parameter mixing ideas from Wu
et al. [J. Chem. Phys. 135, 155104 (2011)].  This helps get the
dissimilar particle mixing behavior to be the same whichever order the
two particles come in.  This does require that the force field file to
specify explicitly the values for epsilon in the cross (X), side-by-side (S), 
and end-to-end (E) configurations.


File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
15 * distribution.
16 *
17 * This software is provided "AS IS," without a warranty of any
18 * kind. All express or implied conditions, representations and
19 * warranties, including any implied warranty of merchantability,
20 * fitness for a particular purpose or non-infringement, are hereby
21 * excluded. The University of Notre Dame and its licensors shall not
22 * be liable for any damages suffered by licensee as a result of
23 * using, modifying or distributing the software or its
24 * derivatives. In no event will the University of Notre Dame or its
25 * licensors be liable for any lost revenue, profit or data, or for
26 * direct, indirect, special, consequential, incidental or punitive
27 * damages, however caused and regardless of the theory of liability,
28 * arising out of the use of or inability to use software, even if the
29 * University of Notre Dame has been advised of the possibility of
30 * such damages.
31 *
32 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 * research, please cite the appropriate papers when you publish your
34 * work. Good starting points are:
35 *
36 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010).
40 * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41 */
42 #include "parallel/ForceMatrixDecomposition.hpp"
43 #include "math/SquareMatrix3.hpp"
44 #include "nonbonded/NonBondedInteraction.hpp"
45 #include "brains/SnapshotManager.hpp"
46 #include "brains/PairList.hpp"
47
48 using namespace std;
49 namespace OpenMD {
50
51 ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52
53 // In a parallel computation, row and colum scans must visit all
54 // surrounding cells (not just the 14 upper triangular blocks that
55 // are used when the processor can see all pairs)
56 #ifdef IS_MPI
57 cellOffsets_.clear();
58 cellOffsets_.push_back( Vector3i(-1,-1,-1) );
59 cellOffsets_.push_back( Vector3i( 0,-1,-1) );
60 cellOffsets_.push_back( Vector3i( 1,-1,-1) );
61 cellOffsets_.push_back( Vector3i(-1, 0,-1) );
62 cellOffsets_.push_back( Vector3i( 0, 0,-1) );
63 cellOffsets_.push_back( Vector3i( 1, 0,-1) );
64 cellOffsets_.push_back( Vector3i(-1, 1,-1) );
65 cellOffsets_.push_back( Vector3i( 0, 1,-1) );
66 cellOffsets_.push_back( Vector3i( 1, 1,-1) );
67 cellOffsets_.push_back( Vector3i(-1,-1, 0) );
68 cellOffsets_.push_back( Vector3i( 0,-1, 0) );
69 cellOffsets_.push_back( Vector3i( 1,-1, 0) );
70 cellOffsets_.push_back( Vector3i(-1, 0, 0) );
71 cellOffsets_.push_back( Vector3i( 0, 0, 0) );
72 cellOffsets_.push_back( Vector3i( 1, 0, 0) );
73 cellOffsets_.push_back( Vector3i(-1, 1, 0) );
74 cellOffsets_.push_back( Vector3i( 0, 1, 0) );
75 cellOffsets_.push_back( Vector3i( 1, 1, 0) );
76 cellOffsets_.push_back( Vector3i(-1,-1, 1) );
77 cellOffsets_.push_back( Vector3i( 0,-1, 1) );
78 cellOffsets_.push_back( Vector3i( 1,-1, 1) );
79 cellOffsets_.push_back( Vector3i(-1, 0, 1) );
80 cellOffsets_.push_back( Vector3i( 0, 0, 1) );
81 cellOffsets_.push_back( Vector3i( 1, 0, 1) );
82 cellOffsets_.push_back( Vector3i(-1, 1, 1) );
83 cellOffsets_.push_back( Vector3i( 0, 1, 1) );
84 cellOffsets_.push_back( Vector3i( 1, 1, 1) );
85 #endif
86 }
87
88
89 /**
90 * distributeInitialData is essentially a copy of the older fortran
91 * SimulationSetup
92 */
93 void ForceMatrixDecomposition::distributeInitialData() {
94 snap_ = sman_->getCurrentSnapshot();
95 storageLayout_ = sman_->getStorageLayout();
96 ff_ = info_->getForceField();
97 nLocal_ = snap_->getNumberOfAtoms();
98
99 nGroups_ = info_->getNLocalCutoffGroups();
100 // gather the information for atomtype IDs (atids):
101 idents = info_->getIdentArray();
102 AtomLocalToGlobal = info_->getGlobalAtomIndices();
103 cgLocalToGlobal = info_->getGlobalGroupIndices();
104 vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
105
106 massFactors = info_->getMassFactors();
107
108 PairList* excludes = info_->getExcludedInteractions();
109 PairList* oneTwo = info_->getOneTwoInteractions();
110 PairList* oneThree = info_->getOneThreeInteractions();
111 PairList* oneFour = info_->getOneFourInteractions();
112
113 #ifdef IS_MPI
114
115 MPI::Intracomm row = rowComm.getComm();
116 MPI::Intracomm col = colComm.getComm();
117
118 AtomPlanIntRow = new Plan<int>(row, nLocal_);
119 AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
120 AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
121 AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
122 AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
123
124 AtomPlanIntColumn = new Plan<int>(col, nLocal_);
125 AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
126 AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
127 AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
128 AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
129
130 cgPlanIntRow = new Plan<int>(row, nGroups_);
131 cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
132 cgPlanIntColumn = new Plan<int>(col, nGroups_);
133 cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
134
135 nAtomsInRow_ = AtomPlanIntRow->getSize();
136 nAtomsInCol_ = AtomPlanIntColumn->getSize();
137 nGroupsInRow_ = cgPlanIntRow->getSize();
138 nGroupsInCol_ = cgPlanIntColumn->getSize();
139
140 // Modify the data storage objects with the correct layouts and sizes:
141 atomRowData.resize(nAtomsInRow_);
142 atomRowData.setStorageLayout(storageLayout_);
143 atomColData.resize(nAtomsInCol_);
144 atomColData.setStorageLayout(storageLayout_);
145 cgRowData.resize(nGroupsInRow_);
146 cgRowData.setStorageLayout(DataStorage::dslPosition);
147 cgColData.resize(nGroupsInCol_);
148 cgColData.setStorageLayout(DataStorage::dslPosition);
149
150 identsRow.resize(nAtomsInRow_);
151 identsCol.resize(nAtomsInCol_);
152
153 AtomPlanIntRow->gather(idents, identsRow);
154 AtomPlanIntColumn->gather(idents, identsCol);
155
156 // allocate memory for the parallel objects
157 atypesRow.resize(nAtomsInRow_);
158 atypesCol.resize(nAtomsInCol_);
159
160 for (int i = 0; i < nAtomsInRow_; i++)
161 atypesRow[i] = ff_->getAtomType(identsRow[i]);
162 for (int i = 0; i < nAtomsInCol_; i++)
163 atypesCol[i] = ff_->getAtomType(identsCol[i]);
164
165 pot_row.resize(nAtomsInRow_);
166 pot_col.resize(nAtomsInCol_);
167
168 AtomRowToGlobal.resize(nAtomsInRow_);
169 AtomColToGlobal.resize(nAtomsInCol_);
170 AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
171 AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
172
173 cgRowToGlobal.resize(nGroupsInRow_);
174 cgColToGlobal.resize(nGroupsInCol_);
175 cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
176 cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
177
178 massFactorsRow.resize(nAtomsInRow_);
179 massFactorsCol.resize(nAtomsInCol_);
180 AtomPlanRealRow->gather(massFactors, massFactorsRow);
181 AtomPlanRealColumn->gather(massFactors, massFactorsCol);
182
183 groupListRow_.clear();
184 groupListRow_.resize(nGroupsInRow_);
185 for (int i = 0; i < nGroupsInRow_; i++) {
186 int gid = cgRowToGlobal[i];
187 for (int j = 0; j < nAtomsInRow_; j++) {
188 int aid = AtomRowToGlobal[j];
189 if (globalGroupMembership[aid] == gid)
190 groupListRow_[i].push_back(j);
191 }
192 }
193
194 groupListCol_.clear();
195 groupListCol_.resize(nGroupsInCol_);
196 for (int i = 0; i < nGroupsInCol_; i++) {
197 int gid = cgColToGlobal[i];
198 for (int j = 0; j < nAtomsInCol_; j++) {
199 int aid = AtomColToGlobal[j];
200 if (globalGroupMembership[aid] == gid)
201 groupListCol_[i].push_back(j);
202 }
203 }
204
205 excludesForAtom.clear();
206 excludesForAtom.resize(nAtomsInRow_);
207 toposForAtom.clear();
208 toposForAtom.resize(nAtomsInRow_);
209 topoDist.clear();
210 topoDist.resize(nAtomsInRow_);
211 for (int i = 0; i < nAtomsInRow_; i++) {
212 int iglob = AtomRowToGlobal[i];
213
214 for (int j = 0; j < nAtomsInCol_; j++) {
215 int jglob = AtomColToGlobal[j];
216
217 if (excludes->hasPair(iglob, jglob))
218 excludesForAtom[i].push_back(j);
219
220 if (oneTwo->hasPair(iglob, jglob)) {
221 toposForAtom[i].push_back(j);
222 topoDist[i].push_back(1);
223 } else {
224 if (oneThree->hasPair(iglob, jglob)) {
225 toposForAtom[i].push_back(j);
226 topoDist[i].push_back(2);
227 } else {
228 if (oneFour->hasPair(iglob, jglob)) {
229 toposForAtom[i].push_back(j);
230 topoDist[i].push_back(3);
231 }
232 }
233 }
234 }
235 }
236
237 #else
238 excludesForAtom.clear();
239 excludesForAtom.resize(nLocal_);
240 toposForAtom.clear();
241 toposForAtom.resize(nLocal_);
242 topoDist.clear();
243 topoDist.resize(nLocal_);
244
245 for (int i = 0; i < nLocal_; i++) {
246 int iglob = AtomLocalToGlobal[i];
247
248 for (int j = 0; j < nLocal_; j++) {
249 int jglob = AtomLocalToGlobal[j];
250
251 if (excludes->hasPair(iglob, jglob))
252 excludesForAtom[i].push_back(j);
253
254 if (oneTwo->hasPair(iglob, jglob)) {
255 toposForAtom[i].push_back(j);
256 topoDist[i].push_back(1);
257 } else {
258 if (oneThree->hasPair(iglob, jglob)) {
259 toposForAtom[i].push_back(j);
260 topoDist[i].push_back(2);
261 } else {
262 if (oneFour->hasPair(iglob, jglob)) {
263 toposForAtom[i].push_back(j);
264 topoDist[i].push_back(3);
265 }
266 }
267 }
268 }
269 }
270 #endif
271
272 // allocate memory for the parallel objects
273 atypesLocal.resize(nLocal_);
274
275 for (int i = 0; i < nLocal_; i++)
276 atypesLocal[i] = ff_->getAtomType(idents[i]);
277
278 groupList_.clear();
279 groupList_.resize(nGroups_);
280 for (int i = 0; i < nGroups_; i++) {
281 int gid = cgLocalToGlobal[i];
282 for (int j = 0; j < nLocal_; j++) {
283 int aid = AtomLocalToGlobal[j];
284 if (globalGroupMembership[aid] == gid) {
285 groupList_[i].push_back(j);
286 }
287 }
288 }
289
290
291 createGtypeCutoffMap();
292
293 }
294
295 void ForceMatrixDecomposition::createGtypeCutoffMap() {
296
297 RealType tol = 1e-6;
298 largestRcut_ = 0.0;
299 RealType rc;
300 int atid;
301 set<AtomType*> atypes = info_->getSimulatedAtomTypes();
302
303 map<int, RealType> atypeCutoff;
304
305 for (set<AtomType*>::iterator at = atypes.begin();
306 at != atypes.end(); ++at){
307 atid = (*at)->getIdent();
308 if (userChoseCutoff_)
309 atypeCutoff[atid] = userCutoff_;
310 else
311 atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
312 }
313
314 vector<RealType> gTypeCutoffs;
315 // first we do a single loop over the cutoff groups to find the
316 // largest cutoff for any atypes present in this group.
317 #ifdef IS_MPI
318 vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
319 groupRowToGtype.resize(nGroupsInRow_);
320 for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
321 vector<int> atomListRow = getAtomsInGroupRow(cg1);
322 for (vector<int>::iterator ia = atomListRow.begin();
323 ia != atomListRow.end(); ++ia) {
324 int atom1 = (*ia);
325 atid = identsRow[atom1];
326 if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
327 groupCutoffRow[cg1] = atypeCutoff[atid];
328 }
329 }
330
331 bool gTypeFound = false;
332 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
333 if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
334 groupRowToGtype[cg1] = gt;
335 gTypeFound = true;
336 }
337 }
338 if (!gTypeFound) {
339 gTypeCutoffs.push_back( groupCutoffRow[cg1] );
340 groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
341 }
342
343 }
344 vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
345 groupColToGtype.resize(nGroupsInCol_);
346 for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
347 vector<int> atomListCol = getAtomsInGroupColumn(cg2);
348 for (vector<int>::iterator jb = atomListCol.begin();
349 jb != atomListCol.end(); ++jb) {
350 int atom2 = (*jb);
351 atid = identsCol[atom2];
352 if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
353 groupCutoffCol[cg2] = atypeCutoff[atid];
354 }
355 }
356 bool gTypeFound = false;
357 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
358 if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
359 groupColToGtype[cg2] = gt;
360 gTypeFound = true;
361 }
362 }
363 if (!gTypeFound) {
364 gTypeCutoffs.push_back( groupCutoffCol[cg2] );
365 groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
366 }
367 }
368 #else
369
370 vector<RealType> groupCutoff(nGroups_, 0.0);
371 groupToGtype.resize(nGroups_);
372 for (int cg1 = 0; cg1 < nGroups_; cg1++) {
373 groupCutoff[cg1] = 0.0;
374 vector<int> atomList = getAtomsInGroupRow(cg1);
375 for (vector<int>::iterator ia = atomList.begin();
376 ia != atomList.end(); ++ia) {
377 int atom1 = (*ia);
378 atid = idents[atom1];
379 if (atypeCutoff[atid] > groupCutoff[cg1])
380 groupCutoff[cg1] = atypeCutoff[atid];
381 }
382
383 bool gTypeFound = false;
384 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
385 if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
386 groupToGtype[cg1] = gt;
387 gTypeFound = true;
388 }
389 }
390 if (!gTypeFound) {
391 gTypeCutoffs.push_back( groupCutoff[cg1] );
392 groupToGtype[cg1] = gTypeCutoffs.size() - 1;
393 }
394 }
395 #endif
396
397 // Now we find the maximum group cutoff value present in the simulation
398
399 RealType groupMax = *max_element(gTypeCutoffs.begin(),
400 gTypeCutoffs.end());
401
402 #ifdef IS_MPI
403 MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE,
404 MPI::MAX);
405 #endif
406
407 RealType tradRcut = groupMax;
408
409 for (int i = 0; i < gTypeCutoffs.size(); i++) {
410 for (int j = 0; j < gTypeCutoffs.size(); j++) {
411 RealType thisRcut;
412 switch(cutoffPolicy_) {
413 case TRADITIONAL:
414 thisRcut = tradRcut;
415 break;
416 case MIX:
417 thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
418 break;
419 case MAX:
420 thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
421 break;
422 default:
423 sprintf(painCave.errMsg,
424 "ForceMatrixDecomposition::createGtypeCutoffMap "
425 "hit an unknown cutoff policy!\n");
426 painCave.severity = OPENMD_ERROR;
427 painCave.isFatal = 1;
428 simError();
429 break;
430 }
431
432 pair<int,int> key = make_pair(i,j);
433 gTypeCutoffMap[key].first = thisRcut;
434 if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
435 gTypeCutoffMap[key].second = thisRcut*thisRcut;
436 gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
437 // sanity check
438
439 if (userChoseCutoff_) {
440 if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
441 sprintf(painCave.errMsg,
442 "ForceMatrixDecomposition::createGtypeCutoffMap "
443 "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
444 painCave.severity = OPENMD_ERROR;
445 painCave.isFatal = 1;
446 simError();
447 }
448 }
449 }
450 }
451 }
452
453
454 groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
455 int i, j;
456 #ifdef IS_MPI
457 i = groupRowToGtype[cg1];
458 j = groupColToGtype[cg2];
459 #else
460 i = groupToGtype[cg1];
461 j = groupToGtype[cg2];
462 #endif
463 return gTypeCutoffMap[make_pair(i,j)];
464 }
465
466 int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
467 for (int j = 0; j < toposForAtom[atom1].size(); j++) {
468 if (toposForAtom[atom1][j] == atom2)
469 return topoDist[atom1][j];
470 }
471 return 0;
472 }
473
474 void ForceMatrixDecomposition::zeroWorkArrays() {
475 pairwisePot = 0.0;
476 embeddingPot = 0.0;
477
478 #ifdef IS_MPI
479 if (storageLayout_ & DataStorage::dslForce) {
480 fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
481 fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
482 }
483
484 if (storageLayout_ & DataStorage::dslTorque) {
485 fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
486 fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
487 }
488
489 fill(pot_row.begin(), pot_row.end(),
490 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
491
492 fill(pot_col.begin(), pot_col.end(),
493 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
494
495 if (storageLayout_ & DataStorage::dslParticlePot) {
496 fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
497 0.0);
498 fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
499 0.0);
500 }
501
502 if (storageLayout_ & DataStorage::dslDensity) {
503 fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
504 fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
505 }
506
507 if (storageLayout_ & DataStorage::dslFunctional) {
508 fill(atomRowData.functional.begin(), atomRowData.functional.end(),
509 0.0);
510 fill(atomColData.functional.begin(), atomColData.functional.end(),
511 0.0);
512 }
513
514 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
515 fill(atomRowData.functionalDerivative.begin(),
516 atomRowData.functionalDerivative.end(), 0.0);
517 fill(atomColData.functionalDerivative.begin(),
518 atomColData.functionalDerivative.end(), 0.0);
519 }
520
521 if (storageLayout_ & DataStorage::dslSkippedCharge) {
522 fill(atomRowData.skippedCharge.begin(),
523 atomRowData.skippedCharge.end(), 0.0);
524 fill(atomColData.skippedCharge.begin(),
525 atomColData.skippedCharge.end(), 0.0);
526 }
527
528 #endif
529 // even in parallel, we need to zero out the local arrays:
530
531 if (storageLayout_ & DataStorage::dslParticlePot) {
532 fill(snap_->atomData.particlePot.begin(),
533 snap_->atomData.particlePot.end(), 0.0);
534 }
535
536 if (storageLayout_ & DataStorage::dslDensity) {
537 fill(snap_->atomData.density.begin(),
538 snap_->atomData.density.end(), 0.0);
539 }
540 if (storageLayout_ & DataStorage::dslFunctional) {
541 fill(snap_->atomData.functional.begin(),
542 snap_->atomData.functional.end(), 0.0);
543 }
544 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
545 fill(snap_->atomData.functionalDerivative.begin(),
546 snap_->atomData.functionalDerivative.end(), 0.0);
547 }
548 if (storageLayout_ & DataStorage::dslSkippedCharge) {
549 fill(snap_->atomData.skippedCharge.begin(),
550 snap_->atomData.skippedCharge.end(), 0.0);
551 }
552
553 }
554
555
556 void ForceMatrixDecomposition::distributeData() {
557 snap_ = sman_->getCurrentSnapshot();
558 storageLayout_ = sman_->getStorageLayout();
559 #ifdef IS_MPI
560
561 // gather up the atomic positions
562 AtomPlanVectorRow->gather(snap_->atomData.position,
563 atomRowData.position);
564 AtomPlanVectorColumn->gather(snap_->atomData.position,
565 atomColData.position);
566
567 // gather up the cutoff group positions
568
569 cgPlanVectorRow->gather(snap_->cgData.position,
570 cgRowData.position);
571
572 cgPlanVectorColumn->gather(snap_->cgData.position,
573 cgColData.position);
574
575
576 // if needed, gather the atomic rotation matrices
577 if (storageLayout_ & DataStorage::dslAmat) {
578 AtomPlanMatrixRow->gather(snap_->atomData.aMat,
579 atomRowData.aMat);
580 AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
581 atomColData.aMat);
582 }
583
584 // if needed, gather the atomic eletrostatic frames
585 if (storageLayout_ & DataStorage::dslElectroFrame) {
586 AtomPlanMatrixRow->gather(snap_->atomData.electroFrame,
587 atomRowData.electroFrame);
588 AtomPlanMatrixColumn->gather(snap_->atomData.electroFrame,
589 atomColData.electroFrame);
590 }
591
592 #endif
593 }
594
595 /* collects information obtained during the pre-pair loop onto local
596 * data structures.
597 */
598 void ForceMatrixDecomposition::collectIntermediateData() {
599 snap_ = sman_->getCurrentSnapshot();
600 storageLayout_ = sman_->getStorageLayout();
601 #ifdef IS_MPI
602
603 if (storageLayout_ & DataStorage::dslDensity) {
604
605 AtomPlanRealRow->scatter(atomRowData.density,
606 snap_->atomData.density);
607
608 int n = snap_->atomData.density.size();
609 vector<RealType> rho_tmp(n, 0.0);
610 AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
611 for (int i = 0; i < n; i++)
612 snap_->atomData.density[i] += rho_tmp[i];
613 }
614 #endif
615 }
616
617 /*
618 * redistributes information obtained during the pre-pair loop out to
619 * row and column-indexed data structures
620 */
621 void ForceMatrixDecomposition::distributeIntermediateData() {
622 snap_ = sman_->getCurrentSnapshot();
623 storageLayout_ = sman_->getStorageLayout();
624 #ifdef IS_MPI
625 if (storageLayout_ & DataStorage::dslFunctional) {
626 AtomPlanRealRow->gather(snap_->atomData.functional,
627 atomRowData.functional);
628 AtomPlanRealColumn->gather(snap_->atomData.functional,
629 atomColData.functional);
630 }
631
632 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
633 AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
634 atomRowData.functionalDerivative);
635 AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
636 atomColData.functionalDerivative);
637 }
638 #endif
639 }
640
641
642 void ForceMatrixDecomposition::collectData() {
643 snap_ = sman_->getCurrentSnapshot();
644 storageLayout_ = sman_->getStorageLayout();
645 #ifdef IS_MPI
646 int n = snap_->atomData.force.size();
647 vector<Vector3d> frc_tmp(n, V3Zero);
648
649 AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
650 for (int i = 0; i < n; i++) {
651 snap_->atomData.force[i] += frc_tmp[i];
652 frc_tmp[i] = 0.0;
653 }
654
655 AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
656 for (int i = 0; i < n; i++) {
657 snap_->atomData.force[i] += frc_tmp[i];
658 }
659
660 if (storageLayout_ & DataStorage::dslTorque) {
661
662 int nt = snap_->atomData.torque.size();
663 vector<Vector3d> trq_tmp(nt, V3Zero);
664
665 AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
666 for (int i = 0; i < nt; i++) {
667 snap_->atomData.torque[i] += trq_tmp[i];
668 trq_tmp[i] = 0.0;
669 }
670
671 AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
672 for (int i = 0; i < nt; i++)
673 snap_->atomData.torque[i] += trq_tmp[i];
674 }
675
676 if (storageLayout_ & DataStorage::dslSkippedCharge) {
677
678 int ns = snap_->atomData.skippedCharge.size();
679 vector<RealType> skch_tmp(ns, 0.0);
680
681 AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
682 for (int i = 0; i < ns; i++) {
683 snap_->atomData.skippedCharge[i] += skch_tmp[i];
684 skch_tmp[i] = 0.0;
685 }
686
687 AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
688 for (int i = 0; i < ns; i++)
689 snap_->atomData.skippedCharge[i] += skch_tmp[i];
690
691 }
692
693 nLocal_ = snap_->getNumberOfAtoms();
694
695 vector<potVec> pot_temp(nLocal_,
696 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
697
698 // scatter/gather pot_row into the members of my column
699
700 AtomPlanPotRow->scatter(pot_row, pot_temp);
701
702 for (int ii = 0; ii < pot_temp.size(); ii++ )
703 pairwisePot += pot_temp[ii];
704
705 fill(pot_temp.begin(), pot_temp.end(),
706 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
707
708 AtomPlanPotColumn->scatter(pot_col, pot_temp);
709
710 for (int ii = 0; ii < pot_temp.size(); ii++ )
711 pairwisePot += pot_temp[ii];
712
713 for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
714 RealType ploc1 = pairwisePot[ii];
715 RealType ploc2 = 0.0;
716 MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
717 pairwisePot[ii] = ploc2;
718 }
719
720 for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
721 RealType ploc1 = embeddingPot[ii];
722 RealType ploc2 = 0.0;
723 MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
724 embeddingPot[ii] = ploc2;
725 }
726
727 #endif
728
729 }
730
731 int ForceMatrixDecomposition::getNAtomsInRow() {
732 #ifdef IS_MPI
733 return nAtomsInRow_;
734 #else
735 return nLocal_;
736 #endif
737 }
738
739 /**
740 * returns the list of atoms belonging to this group.
741 */
742 vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
743 #ifdef IS_MPI
744 return groupListRow_[cg1];
745 #else
746 return groupList_[cg1];
747 #endif
748 }
749
750 vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
751 #ifdef IS_MPI
752 return groupListCol_[cg2];
753 #else
754 return groupList_[cg2];
755 #endif
756 }
757
758 Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
759 Vector3d d;
760
761 #ifdef IS_MPI
762 d = cgColData.position[cg2] - cgRowData.position[cg1];
763 #else
764 d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
765 #endif
766
767 snap_->wrapVector(d);
768 return d;
769 }
770
771
772 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
773
774 Vector3d d;
775
776 #ifdef IS_MPI
777 d = cgRowData.position[cg1] - atomRowData.position[atom1];
778 #else
779 d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
780 #endif
781
782 snap_->wrapVector(d);
783 return d;
784 }
785
786 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
787 Vector3d d;
788
789 #ifdef IS_MPI
790 d = cgColData.position[cg2] - atomColData.position[atom2];
791 #else
792 d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
793 #endif
794
795 snap_->wrapVector(d);
796 return d;
797 }
798
799 RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
800 #ifdef IS_MPI
801 return massFactorsRow[atom1];
802 #else
803 return massFactors[atom1];
804 #endif
805 }
806
807 RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
808 #ifdef IS_MPI
809 return massFactorsCol[atom2];
810 #else
811 return massFactors[atom2];
812 #endif
813
814 }
815
816 Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
817 Vector3d d;
818
819 #ifdef IS_MPI
820 d = atomColData.position[atom2] - atomRowData.position[atom1];
821 #else
822 d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
823 #endif
824
825 snap_->wrapVector(d);
826 return d;
827 }
828
829 vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
830 return excludesForAtom[atom1];
831 }
832
833 /**
834 * We need to exclude some overcounted interactions that result from
835 * the parallel decomposition.
836 */
837 bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
838 int unique_id_1, unique_id_2;
839
840 #ifdef IS_MPI
841 // in MPI, we have to look up the unique IDs for each atom
842 unique_id_1 = AtomRowToGlobal[atom1];
843 unique_id_2 = AtomColToGlobal[atom2];
844 #else
845 unique_id_1 = AtomLocalToGlobal[atom1];
846 unique_id_2 = AtomLocalToGlobal[atom2];
847 #endif
848
849 if (unique_id_1 == unique_id_2) return true;
850
851 #ifdef IS_MPI
852 // this prevents us from doing the pair on multiple processors
853 if (unique_id_1 < unique_id_2) {
854 if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
855 } else {
856 if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
857 }
858 #endif
859
860 return false;
861 }
862
863 /**
864 * We need to handle the interactions for atoms who are involved in
865 * the same rigid body as well as some short range interactions
866 * (bonds, bends, torsions) differently from other interactions.
867 * We'll still visit the pairwise routines, but with a flag that
868 * tells those routines to exclude the pair from direct long range
869 * interactions. Some indirect interactions (notably reaction
870 * field) must still be handled for these pairs.
871 */
872 bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
873
874 // excludesForAtom was constructed to use row/column indices in the MPI
875 // version, and to use local IDs in the non-MPI version:
876
877 for (vector<int>::iterator i = excludesForAtom[atom1].begin();
878 i != excludesForAtom[atom1].end(); ++i) {
879 if ( (*i) == atom2 ) return true;
880 }
881
882 return false;
883 }
884
885
886 void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
887 #ifdef IS_MPI
888 atomRowData.force[atom1] += fg;
889 #else
890 snap_->atomData.force[atom1] += fg;
891 #endif
892 }
893
894 void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
895 #ifdef IS_MPI
896 atomColData.force[atom2] += fg;
897 #else
898 snap_->atomData.force[atom2] += fg;
899 #endif
900 }
901
902 // filling interaction blocks with pointers
903 void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
904 int atom1, int atom2) {
905
906 idat.excluded = excludeAtomPair(atom1, atom2);
907
908 #ifdef IS_MPI
909 idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
910 //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
911 // ff_->getAtomType(identsCol[atom2]) );
912
913 if (storageLayout_ & DataStorage::dslAmat) {
914 idat.A1 = &(atomRowData.aMat[atom1]);
915 idat.A2 = &(atomColData.aMat[atom2]);
916 }
917
918 if (storageLayout_ & DataStorage::dslElectroFrame) {
919 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
920 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
921 }
922
923 if (storageLayout_ & DataStorage::dslTorque) {
924 idat.t1 = &(atomRowData.torque[atom1]);
925 idat.t2 = &(atomColData.torque[atom2]);
926 }
927
928 if (storageLayout_ & DataStorage::dslDensity) {
929 idat.rho1 = &(atomRowData.density[atom1]);
930 idat.rho2 = &(atomColData.density[atom2]);
931 }
932
933 if (storageLayout_ & DataStorage::dslFunctional) {
934 idat.frho1 = &(atomRowData.functional[atom1]);
935 idat.frho2 = &(atomColData.functional[atom2]);
936 }
937
938 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
939 idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
940 idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
941 }
942
943 if (storageLayout_ & DataStorage::dslParticlePot) {
944 idat.particlePot1 = &(atomRowData.particlePot[atom1]);
945 idat.particlePot2 = &(atomColData.particlePot[atom2]);
946 }
947
948 if (storageLayout_ & DataStorage::dslSkippedCharge) {
949 idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
950 idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
951 }
952
953 #else
954
955
956 // cerr << "atoms = " << atom1 << " " << atom2 << "\n";
957 // cerr << "pos1 = " << snap_->atomData.position[atom1] << "\n";
958 // cerr << "pos2 = " << snap_->atomData.position[atom2] << "\n";
959
960 idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
961 //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
962 // ff_->getAtomType(idents[atom2]) );
963
964 if (storageLayout_ & DataStorage::dslAmat) {
965 idat.A1 = &(snap_->atomData.aMat[atom1]);
966 idat.A2 = &(snap_->atomData.aMat[atom2]);
967 }
968
969 if (storageLayout_ & DataStorage::dslElectroFrame) {
970 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
971 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
972 }
973
974 if (storageLayout_ & DataStorage::dslTorque) {
975 idat.t1 = &(snap_->atomData.torque[atom1]);
976 idat.t2 = &(snap_->atomData.torque[atom2]);
977 }
978
979 if (storageLayout_ & DataStorage::dslDensity) {
980 idat.rho1 = &(snap_->atomData.density[atom1]);
981 idat.rho2 = &(snap_->atomData.density[atom2]);
982 }
983
984 if (storageLayout_ & DataStorage::dslFunctional) {
985 idat.frho1 = &(snap_->atomData.functional[atom1]);
986 idat.frho2 = &(snap_->atomData.functional[atom2]);
987 }
988
989 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
990 idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
991 idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
992 }
993
994 if (storageLayout_ & DataStorage::dslParticlePot) {
995 idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
996 idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
997 }
998
999 if (storageLayout_ & DataStorage::dslSkippedCharge) {
1000 idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1001 idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1002 }
1003 #endif
1004 }
1005
1006
1007 void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {
1008 #ifdef IS_MPI
1009 pot_row[atom1] += RealType(0.5) * *(idat.pot);
1010 pot_col[atom2] += RealType(0.5) * *(idat.pot);
1011
1012 atomRowData.force[atom1] += *(idat.f1);
1013 atomColData.force[atom2] -= *(idat.f1);
1014 #else
1015 pairwisePot += *(idat.pot);
1016
1017 snap_->atomData.force[atom1] += *(idat.f1);
1018 snap_->atomData.force[atom2] -= *(idat.f1);
1019 #endif
1020
1021 }
1022
1023 /*
1024 * buildNeighborList
1025 *
1026 * first element of pair is row-indexed CutoffGroup
1027 * second element of pair is column-indexed CutoffGroup
1028 */
1029 vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1030
1031 vector<pair<int, int> > neighborList;
1032 groupCutoffs cuts;
1033 bool doAllPairs = false;
1034
1035 #ifdef IS_MPI
1036 cellListRow_.clear();
1037 cellListCol_.clear();
1038 #else
1039 cellList_.clear();
1040 #endif
1041
1042 RealType rList_ = (largestRcut_ + skinThickness_);
1043 RealType rl2 = rList_ * rList_;
1044 Snapshot* snap_ = sman_->getCurrentSnapshot();
1045 Mat3x3d Hmat = snap_->getHmat();
1046 Vector3d Hx = Hmat.getColumn(0);
1047 Vector3d Hy = Hmat.getColumn(1);
1048 Vector3d Hz = Hmat.getColumn(2);
1049
1050 nCells_.x() = (int) ( Hx.length() )/ rList_;
1051 nCells_.y() = (int) ( Hy.length() )/ rList_;
1052 nCells_.z() = (int) ( Hz.length() )/ rList_;
1053
1054 // handle small boxes where the cell offsets can end up repeating cells
1055
1056 if (nCells_.x() < 3) doAllPairs = true;
1057 if (nCells_.y() < 3) doAllPairs = true;
1058 if (nCells_.z() < 3) doAllPairs = true;
1059
1060 Mat3x3d invHmat = snap_->getInvHmat();
1061 Vector3d rs, scaled, dr;
1062 Vector3i whichCell;
1063 int cellIndex;
1064 int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1065
1066 #ifdef IS_MPI
1067 cellListRow_.resize(nCtot);
1068 cellListCol_.resize(nCtot);
1069 #else
1070 cellList_.resize(nCtot);
1071 #endif
1072
1073 if (!doAllPairs) {
1074 #ifdef IS_MPI
1075
1076 for (int i = 0; i < nGroupsInRow_; i++) {
1077 rs = cgRowData.position[i];
1078
1079 // scaled positions relative to the box vectors
1080 scaled = invHmat * rs;
1081
1082 // wrap the vector back into the unit box by subtracting integer box
1083 // numbers
1084 for (int j = 0; j < 3; j++) {
1085 scaled[j] -= roundMe(scaled[j]);
1086 scaled[j] += 0.5;
1087 }
1088
1089 // find xyz-indices of cell that cutoffGroup is in.
1090 whichCell.x() = nCells_.x() * scaled.x();
1091 whichCell.y() = nCells_.y() * scaled.y();
1092 whichCell.z() = nCells_.z() * scaled.z();
1093
1094 // find single index of this cell:
1095 cellIndex = Vlinear(whichCell, nCells_);
1096
1097 // add this cutoff group to the list of groups in this cell;
1098 cellListRow_[cellIndex].push_back(i);
1099 }
1100 for (int i = 0; i < nGroupsInCol_; i++) {
1101 rs = cgColData.position[i];
1102
1103 // scaled positions relative to the box vectors
1104 scaled = invHmat * rs;
1105
1106 // wrap the vector back into the unit box by subtracting integer box
1107 // numbers
1108 for (int j = 0; j < 3; j++) {
1109 scaled[j] -= roundMe(scaled[j]);
1110 scaled[j] += 0.5;
1111 }
1112
1113 // find xyz-indices of cell that cutoffGroup is in.
1114 whichCell.x() = nCells_.x() * scaled.x();
1115 whichCell.y() = nCells_.y() * scaled.y();
1116 whichCell.z() = nCells_.z() * scaled.z();
1117
1118 // find single index of this cell:
1119 cellIndex = Vlinear(whichCell, nCells_);
1120
1121 // add this cutoff group to the list of groups in this cell;
1122 cellListCol_[cellIndex].push_back(i);
1123 }
1124
1125 #else
1126 for (int i = 0; i < nGroups_; i++) {
1127 rs = snap_->cgData.position[i];
1128
1129 // scaled positions relative to the box vectors
1130 scaled = invHmat * rs;
1131
1132 // wrap the vector back into the unit box by subtracting integer box
1133 // numbers
1134 for (int j = 0; j < 3; j++) {
1135 scaled[j] -= roundMe(scaled[j]);
1136 scaled[j] += 0.5;
1137 }
1138
1139 // find xyz-indices of cell that cutoffGroup is in.
1140 whichCell.x() = nCells_.x() * scaled.x();
1141 whichCell.y() = nCells_.y() * scaled.y();
1142 whichCell.z() = nCells_.z() * scaled.z();
1143
1144 // find single index of this cell:
1145 cellIndex = Vlinear(whichCell, nCells_);
1146
1147 // add this cutoff group to the list of groups in this cell;
1148 cellList_[cellIndex].push_back(i);
1149 }
1150
1151 #endif
1152
1153 for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1154 for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1155 for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1156 Vector3i m1v(m1x, m1y, m1z);
1157 int m1 = Vlinear(m1v, nCells_);
1158
1159 for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1160 os != cellOffsets_.end(); ++os) {
1161
1162 Vector3i m2v = m1v + (*os);
1163
1164
1165 if (m2v.x() >= nCells_.x()) {
1166 m2v.x() = 0;
1167 } else if (m2v.x() < 0) {
1168 m2v.x() = nCells_.x() - 1;
1169 }
1170
1171 if (m2v.y() >= nCells_.y()) {
1172 m2v.y() = 0;
1173 } else if (m2v.y() < 0) {
1174 m2v.y() = nCells_.y() - 1;
1175 }
1176
1177 if (m2v.z() >= nCells_.z()) {
1178 m2v.z() = 0;
1179 } else if (m2v.z() < 0) {
1180 m2v.z() = nCells_.z() - 1;
1181 }
1182
1183 int m2 = Vlinear (m2v, nCells_);
1184
1185 #ifdef IS_MPI
1186 for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1187 j1 != cellListRow_[m1].end(); ++j1) {
1188 for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1189 j2 != cellListCol_[m2].end(); ++j2) {
1190
1191 // In parallel, we need to visit *all* pairs of row
1192 // & column indicies and will divide labor in the
1193 // force evaluation later.
1194 dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1195 snap_->wrapVector(dr);
1196 cuts = getGroupCutoffs( (*j1), (*j2) );
1197 if (dr.lengthSquare() < cuts.third) {
1198 neighborList.push_back(make_pair((*j1), (*j2)));
1199 }
1200 }
1201 }
1202 #else
1203 for (vector<int>::iterator j1 = cellList_[m1].begin();
1204 j1 != cellList_[m1].end(); ++j1) {
1205 for (vector<int>::iterator j2 = cellList_[m2].begin();
1206 j2 != cellList_[m2].end(); ++j2) {
1207
1208 // Always do this if we're in different cells or if
1209 // we're in the same cell and the global index of
1210 // the j2 cutoff group is greater than or equal to
1211 // the j1 cutoff group. Note that Rappaport's code
1212 // has a "less than" conditional here, but that
1213 // deals with atom-by-atom computation. OpenMD
1214 // allows atoms within a single cutoff group to
1215 // interact with each other.
1216
1217
1218
1219 if (m2 != m1 || (*j2) >= (*j1) ) {
1220
1221 dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1222 snap_->wrapVector(dr);
1223 cuts = getGroupCutoffs( (*j1), (*j2) );
1224 if (dr.lengthSquare() < cuts.third) {
1225 neighborList.push_back(make_pair((*j1), (*j2)));
1226 }
1227 }
1228 }
1229 }
1230 #endif
1231 }
1232 }
1233 }
1234 }
1235 } else {
1236 // branch to do all cutoff group pairs
1237 #ifdef IS_MPI
1238 for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1239 for (int j2 = 0; j2 < nGroupsInCol_; j2++) {
1240 dr = cgColData.position[j2] - cgRowData.position[j1];
1241 snap_->wrapVector(dr);
1242 cuts = getGroupCutoffs( j1, j2 );
1243 if (dr.lengthSquare() < cuts.third) {
1244 neighborList.push_back(make_pair(j1, j2));
1245 }
1246 }
1247 }
1248 #else
1249 // include all groups here.
1250 for (int j1 = 0; j1 < nGroups_; j1++) {
1251 // include self group interactions j2 == j1
1252 for (int j2 = j1; j2 < nGroups_; j2++) {
1253 dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1254 snap_->wrapVector(dr);
1255 cuts = getGroupCutoffs( j1, j2 );
1256 if (dr.lengthSquare() < cuts.third) {
1257 neighborList.push_back(make_pair(j1, j2));
1258 }
1259 }
1260 }
1261 #endif
1262 }
1263
1264 // save the local cutoff group positions for the check that is
1265 // done on each loop:
1266 saved_CG_positions_.clear();
1267 for (int i = 0; i < nGroups_; i++)
1268 saved_CG_positions_.push_back(snap_->cgData.position[i]);
1269
1270 return neighborList;
1271 }
1272 } //end namespace OpenMD