ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
Revision: 1721
Committed: Thu May 24 14:17:42 2012 UTC (12 years, 11 months ago) by gezelter
Original Path: branches/development/src/parallel/ForceMatrixDecomposition.cpp
File size: 46251 byte(s)
Log Message:
Fixing some bugs

File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
15 * distribution.
16 *
17 * This software is provided "AS IS," without a warranty of any
18 * kind. All express or implied conditions, representations and
19 * warranties, including any implied warranty of merchantability,
20 * fitness for a particular purpose or non-infringement, are hereby
21 * excluded. The University of Notre Dame and its licensors shall not
22 * be liable for any damages suffered by licensee as a result of
23 * using, modifying or distributing the software or its
24 * derivatives. In no event will the University of Notre Dame or its
25 * licensors be liable for any lost revenue, profit or data, or for
26 * direct, indirect, special, consequential, incidental or punitive
27 * damages, however caused and regardless of the theory of liability,
28 * arising out of the use of or inability to use software, even if the
29 * University of Notre Dame has been advised of the possibility of
30 * such damages.
31 *
32 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 * research, please cite the appropriate papers when you publish your
34 * work. Good starting points are:
35 *
36 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010).
40 * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41 */
42 #include "parallel/ForceMatrixDecomposition.hpp"
43 #include "math/SquareMatrix3.hpp"
44 #include "nonbonded/NonBondedInteraction.hpp"
45 #include "brains/SnapshotManager.hpp"
46 #include "brains/PairList.hpp"
47
48 using namespace std;
49 namespace OpenMD {
50
51 ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52
53 // In a parallel computation, row and colum scans must visit all
54 // surrounding cells (not just the 14 upper triangular blocks that
55 // are used when the processor can see all pairs)
56 #ifdef IS_MPI
57 cellOffsets_.clear();
58 cellOffsets_.push_back( Vector3i(-1,-1,-1) );
59 cellOffsets_.push_back( Vector3i( 0,-1,-1) );
60 cellOffsets_.push_back( Vector3i( 1,-1,-1) );
61 cellOffsets_.push_back( Vector3i(-1, 0,-1) );
62 cellOffsets_.push_back( Vector3i( 0, 0,-1) );
63 cellOffsets_.push_back( Vector3i( 1, 0,-1) );
64 cellOffsets_.push_back( Vector3i(-1, 1,-1) );
65 cellOffsets_.push_back( Vector3i( 0, 1,-1) );
66 cellOffsets_.push_back( Vector3i( 1, 1,-1) );
67 cellOffsets_.push_back( Vector3i(-1,-1, 0) );
68 cellOffsets_.push_back( Vector3i( 0,-1, 0) );
69 cellOffsets_.push_back( Vector3i( 1,-1, 0) );
70 cellOffsets_.push_back( Vector3i(-1, 0, 0) );
71 cellOffsets_.push_back( Vector3i( 0, 0, 0) );
72 cellOffsets_.push_back( Vector3i( 1, 0, 0) );
73 cellOffsets_.push_back( Vector3i(-1, 1, 0) );
74 cellOffsets_.push_back( Vector3i( 0, 1, 0) );
75 cellOffsets_.push_back( Vector3i( 1, 1, 0) );
76 cellOffsets_.push_back( Vector3i(-1,-1, 1) );
77 cellOffsets_.push_back( Vector3i( 0,-1, 1) );
78 cellOffsets_.push_back( Vector3i( 1,-1, 1) );
79 cellOffsets_.push_back( Vector3i(-1, 0, 1) );
80 cellOffsets_.push_back( Vector3i( 0, 0, 1) );
81 cellOffsets_.push_back( Vector3i( 1, 0, 1) );
82 cellOffsets_.push_back( Vector3i(-1, 1, 1) );
83 cellOffsets_.push_back( Vector3i( 0, 1, 1) );
84 cellOffsets_.push_back( Vector3i( 1, 1, 1) );
85 #endif
86 }
87
88
89 /**
90 * distributeInitialData is essentially a copy of the older fortran
91 * SimulationSetup
92 */
93 void ForceMatrixDecomposition::distributeInitialData() {
94 snap_ = sman_->getCurrentSnapshot();
95 storageLayout_ = sman_->getStorageLayout();
96 ff_ = info_->getForceField();
97 nLocal_ = snap_->getNumberOfAtoms();
98
99 nGroups_ = info_->getNLocalCutoffGroups();
100 // gather the information for atomtype IDs (atids):
101 idents = info_->getIdentArray();
102 AtomLocalToGlobal = info_->getGlobalAtomIndices();
103 cgLocalToGlobal = info_->getGlobalGroupIndices();
104 vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
105
106 massFactors = info_->getMassFactors();
107
108 PairList* excludes = info_->getExcludedInteractions();
109 PairList* oneTwo = info_->getOneTwoInteractions();
110 PairList* oneThree = info_->getOneThreeInteractions();
111 PairList* oneFour = info_->getOneFourInteractions();
112
113 #ifdef IS_MPI
114
115 MPI::Intracomm row = rowComm.getComm();
116 MPI::Intracomm col = colComm.getComm();
117
118 AtomPlanIntRow = new Plan<int>(row, nLocal_);
119 AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
120 AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
121 AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
122 AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
123
124 AtomPlanIntColumn = new Plan<int>(col, nLocal_);
125 AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
126 AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
127 AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
128 AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
129
130 cgPlanIntRow = new Plan<int>(row, nGroups_);
131 cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
132 cgPlanIntColumn = new Plan<int>(col, nGroups_);
133 cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
134
135 nAtomsInRow_ = AtomPlanIntRow->getSize();
136 nAtomsInCol_ = AtomPlanIntColumn->getSize();
137 nGroupsInRow_ = cgPlanIntRow->getSize();
138 nGroupsInCol_ = cgPlanIntColumn->getSize();
139
140 // Modify the data storage objects with the correct layouts and sizes:
141 atomRowData.resize(nAtomsInRow_);
142 atomRowData.setStorageLayout(storageLayout_);
143 atomColData.resize(nAtomsInCol_);
144 atomColData.setStorageLayout(storageLayout_);
145 cgRowData.resize(nGroupsInRow_);
146 cgRowData.setStorageLayout(DataStorage::dslPosition);
147 cgColData.resize(nGroupsInCol_);
148 cgColData.setStorageLayout(DataStorage::dslPosition);
149
150 identsRow.resize(nAtomsInRow_);
151 identsCol.resize(nAtomsInCol_);
152
153 AtomPlanIntRow->gather(idents, identsRow);
154 AtomPlanIntColumn->gather(idents, identsCol);
155
156 // allocate memory for the parallel objects
157 atypesRow.resize(nAtomsInRow_);
158 atypesCol.resize(nAtomsInCol_);
159
160 for (int i = 0; i < nAtomsInRow_; i++)
161 atypesRow[i] = ff_->getAtomType(identsRow[i]);
162 for (int i = 0; i < nAtomsInCol_; i++)
163 atypesCol[i] = ff_->getAtomType(identsCol[i]);
164
165 pot_row.resize(nAtomsInRow_);
166 pot_col.resize(nAtomsInCol_);
167
168 AtomRowToGlobal.resize(nAtomsInRow_);
169 AtomColToGlobal.resize(nAtomsInCol_);
170 AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
171 AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
172
173 cgRowToGlobal.resize(nGroupsInRow_);
174 cgColToGlobal.resize(nGroupsInCol_);
175 cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
176 cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
177
178 massFactorsRow.resize(nAtomsInRow_);
179 massFactorsCol.resize(nAtomsInCol_);
180 AtomPlanRealRow->gather(massFactors, massFactorsRow);
181 AtomPlanRealColumn->gather(massFactors, massFactorsCol);
182
183 groupListRow_.clear();
184 groupListRow_.resize(nGroupsInRow_);
185 for (int i = 0; i < nGroupsInRow_; i++) {
186 int gid = cgRowToGlobal[i];
187 for (int j = 0; j < nAtomsInRow_; j++) {
188 int aid = AtomRowToGlobal[j];
189 if (globalGroupMembership[aid] == gid)
190 groupListRow_[i].push_back(j);
191 }
192 }
193
194 groupListCol_.clear();
195 groupListCol_.resize(nGroupsInCol_);
196 for (int i = 0; i < nGroupsInCol_; i++) {
197 int gid = cgColToGlobal[i];
198 for (int j = 0; j < nAtomsInCol_; j++) {
199 int aid = AtomColToGlobal[j];
200 if (globalGroupMembership[aid] == gid)
201 groupListCol_[i].push_back(j);
202 }
203 }
204
205 excludesForAtom.clear();
206 excludesForAtom.resize(nAtomsInRow_);
207 toposForAtom.clear();
208 toposForAtom.resize(nAtomsInRow_);
209 topoDist.clear();
210 topoDist.resize(nAtomsInRow_);
211 for (int i = 0; i < nAtomsInRow_; i++) {
212 int iglob = AtomRowToGlobal[i];
213
214 for (int j = 0; j < nAtomsInCol_; j++) {
215 int jglob = AtomColToGlobal[j];
216
217 if (excludes->hasPair(iglob, jglob))
218 excludesForAtom[i].push_back(j);
219
220 if (oneTwo->hasPair(iglob, jglob)) {
221 toposForAtom[i].push_back(j);
222 topoDist[i].push_back(1);
223 } else {
224 if (oneThree->hasPair(iglob, jglob)) {
225 toposForAtom[i].push_back(j);
226 topoDist[i].push_back(2);
227 } else {
228 if (oneFour->hasPair(iglob, jglob)) {
229 toposForAtom[i].push_back(j);
230 topoDist[i].push_back(3);
231 }
232 }
233 }
234 }
235 }
236
237 #else
238 excludesForAtom.clear();
239 excludesForAtom.resize(nLocal_);
240 toposForAtom.clear();
241 toposForAtom.resize(nLocal_);
242 topoDist.clear();
243 topoDist.resize(nLocal_);
244
245 for (int i = 0; i < nLocal_; i++) {
246 int iglob = AtomLocalToGlobal[i];
247
248 for (int j = 0; j < nLocal_; j++) {
249 int jglob = AtomLocalToGlobal[j];
250
251 if (excludes->hasPair(iglob, jglob))
252 excludesForAtom[i].push_back(j);
253
254 if (oneTwo->hasPair(iglob, jglob)) {
255 toposForAtom[i].push_back(j);
256 topoDist[i].push_back(1);
257 } else {
258 if (oneThree->hasPair(iglob, jglob)) {
259 toposForAtom[i].push_back(j);
260 topoDist[i].push_back(2);
261 } else {
262 if (oneFour->hasPair(iglob, jglob)) {
263 toposForAtom[i].push_back(j);
264 topoDist[i].push_back(3);
265 }
266 }
267 }
268 }
269 }
270 #endif
271
272 // allocate memory for the parallel objects
273 atypesLocal.resize(nLocal_);
274
275 for (int i = 0; i < nLocal_; i++)
276 atypesLocal[i] = ff_->getAtomType(idents[i]);
277
278 groupList_.clear();
279 groupList_.resize(nGroups_);
280 for (int i = 0; i < nGroups_; i++) {
281 int gid = cgLocalToGlobal[i];
282 for (int j = 0; j < nLocal_; j++) {
283 int aid = AtomLocalToGlobal[j];
284 if (globalGroupMembership[aid] == gid) {
285 groupList_[i].push_back(j);
286 }
287 }
288 }
289
290
291 createGtypeCutoffMap();
292
293 }
294
295 void ForceMatrixDecomposition::createGtypeCutoffMap() {
296
297 RealType tol = 1e-6;
298 largestRcut_ = 0.0;
299 RealType rc;
300 int atid;
301 set<AtomType*> atypes = info_->getSimulatedAtomTypes();
302
303 map<int, RealType> atypeCutoff;
304
305 for (set<AtomType*>::iterator at = atypes.begin();
306 at != atypes.end(); ++at){
307 atid = (*at)->getIdent();
308 if (userChoseCutoff_)
309 atypeCutoff[atid] = userCutoff_;
310 else
311 atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
312 }
313
314 vector<RealType> gTypeCutoffs;
315 // first we do a single loop over the cutoff groups to find the
316 // largest cutoff for any atypes present in this group.
317 #ifdef IS_MPI
318 vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
319 groupRowToGtype.resize(nGroupsInRow_);
320 for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
321 vector<int> atomListRow = getAtomsInGroupRow(cg1);
322 for (vector<int>::iterator ia = atomListRow.begin();
323 ia != atomListRow.end(); ++ia) {
324 int atom1 = (*ia);
325 atid = identsRow[atom1];
326 if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
327 groupCutoffRow[cg1] = atypeCutoff[atid];
328 }
329 }
330
331 bool gTypeFound = false;
332 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
333 if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
334 groupRowToGtype[cg1] = gt;
335 gTypeFound = true;
336 }
337 }
338 if (!gTypeFound) {
339 gTypeCutoffs.push_back( groupCutoffRow[cg1] );
340 groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
341 }
342
343 }
344 vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
345 groupColToGtype.resize(nGroupsInCol_);
346 for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
347 vector<int> atomListCol = getAtomsInGroupColumn(cg2);
348 for (vector<int>::iterator jb = atomListCol.begin();
349 jb != atomListCol.end(); ++jb) {
350 int atom2 = (*jb);
351 atid = identsCol[atom2];
352 if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
353 groupCutoffCol[cg2] = atypeCutoff[atid];
354 }
355 }
356 bool gTypeFound = false;
357 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
358 if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
359 groupColToGtype[cg2] = gt;
360 gTypeFound = true;
361 }
362 }
363 if (!gTypeFound) {
364 gTypeCutoffs.push_back( groupCutoffCol[cg2] );
365 groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
366 }
367 }
368 #else
369
370 vector<RealType> groupCutoff(nGroups_, 0.0);
371 groupToGtype.resize(nGroups_);
372 for (int cg1 = 0; cg1 < nGroups_; cg1++) {
373 groupCutoff[cg1] = 0.0;
374 vector<int> atomList = getAtomsInGroupRow(cg1);
375 for (vector<int>::iterator ia = atomList.begin();
376 ia != atomList.end(); ++ia) {
377 int atom1 = (*ia);
378 atid = idents[atom1];
379 if (atypeCutoff[atid] > groupCutoff[cg1])
380 groupCutoff[cg1] = atypeCutoff[atid];
381 }
382
383 bool gTypeFound = false;
384 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
385 if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
386 groupToGtype[cg1] = gt;
387 gTypeFound = true;
388 }
389 }
390 if (!gTypeFound) {
391 gTypeCutoffs.push_back( groupCutoff[cg1] );
392 groupToGtype[cg1] = gTypeCutoffs.size() - 1;
393 }
394 }
395 #endif
396
397 // Now we find the maximum group cutoff value present in the simulation
398
399 RealType groupMax = *max_element(gTypeCutoffs.begin(),
400 gTypeCutoffs.end());
401
402 #ifdef IS_MPI
403 MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE,
404 MPI::MAX);
405 #endif
406
407 RealType tradRcut = groupMax;
408
409 for (int i = 0; i < gTypeCutoffs.size(); i++) {
410 for (int j = 0; j < gTypeCutoffs.size(); j++) {
411 RealType thisRcut;
412 switch(cutoffPolicy_) {
413 case TRADITIONAL:
414 thisRcut = tradRcut;
415 break;
416 case MIX:
417 thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
418 break;
419 case MAX:
420 thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
421 break;
422 default:
423 sprintf(painCave.errMsg,
424 "ForceMatrixDecomposition::createGtypeCutoffMap "
425 "hit an unknown cutoff policy!\n");
426 painCave.severity = OPENMD_ERROR;
427 painCave.isFatal = 1;
428 simError();
429 break;
430 }
431
432 pair<int,int> key = make_pair(i,j);
433 gTypeCutoffMap[key].first = thisRcut;
434 if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
435 gTypeCutoffMap[key].second = thisRcut*thisRcut;
436 gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
437 // sanity check
438
439 if (userChoseCutoff_) {
440 if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
441 sprintf(painCave.errMsg,
442 "ForceMatrixDecomposition::createGtypeCutoffMap "
443 "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
444 painCave.severity = OPENMD_ERROR;
445 painCave.isFatal = 1;
446 simError();
447 }
448 }
449 }
450 }
451 }
452
453
454 groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
455 int i, j;
456 #ifdef IS_MPI
457 i = groupRowToGtype[cg1];
458 j = groupColToGtype[cg2];
459 #else
460 i = groupToGtype[cg1];
461 j = groupToGtype[cg2];
462 #endif
463 return gTypeCutoffMap[make_pair(i,j)];
464 }
465
466 int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
467 for (int j = 0; j < toposForAtom[atom1].size(); j++) {
468 if (toposForAtom[atom1][j] == atom2)
469 return topoDist[atom1][j];
470 }
471 return 0;
472 }
473
474 void ForceMatrixDecomposition::zeroWorkArrays() {
475 pairwisePot = 0.0;
476 embeddingPot = 0.0;
477
478 #ifdef IS_MPI
479 if (storageLayout_ & DataStorage::dslForce) {
480 fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
481 fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
482 }
483
484 if (storageLayout_ & DataStorage::dslTorque) {
485 fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
486 fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
487 }
488
489 fill(pot_row.begin(), pot_row.end(),
490 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
491
492 fill(pot_col.begin(), pot_col.end(),
493 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
494
495 if (storageLayout_ & DataStorage::dslParticlePot) {
496 fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
497 0.0);
498 fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
499 0.0);
500 }
501
502 if (storageLayout_ & DataStorage::dslDensity) {
503 fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
504 fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
505 }
506
507 if (storageLayout_ & DataStorage::dslFunctional) {
508 fill(atomRowData.functional.begin(), atomRowData.functional.end(),
509 0.0);
510 fill(atomColData.functional.begin(), atomColData.functional.end(),
511 0.0);
512 }
513
514 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
515 fill(atomRowData.functionalDerivative.begin(),
516 atomRowData.functionalDerivative.end(), 0.0);
517 fill(atomColData.functionalDerivative.begin(),
518 atomColData.functionalDerivative.end(), 0.0);
519 }
520
521 if (storageLayout_ & DataStorage::dslSkippedCharge) {
522 fill(atomRowData.skippedCharge.begin(),
523 atomRowData.skippedCharge.end(), 0.0);
524 fill(atomColData.skippedCharge.begin(),
525 atomColData.skippedCharge.end(), 0.0);
526 }
527
528 if (storageLayout_ & DataStorage::dslFlucQForce) {
529 fill(atomRowData.flucQFrc.begin(),
530 atomRowData.flucQFrc.end(), 0.0);
531 fill(atomColData.flucQFrc.begin(),
532 atomColData.flucQFrc.end(), 0.0);
533 }
534
535 if (storageLayout_ & DataStorage::dslElectricField) {
536 fill(atomRowData.electricField.begin(),
537 atomRowData.electricField.end(), V3Zero);
538 fill(atomColData.electricField.begin(),
539 atomColData.electricField.end(), V3Zero);
540 }
541
542 if (storageLayout_ & DataStorage::dslFlucQForce) {
543 fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(),
544 0.0);
545 fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(),
546 0.0);
547 }
548
549 #endif
550 // even in parallel, we need to zero out the local arrays:
551
552 if (storageLayout_ & DataStorage::dslParticlePot) {
553 fill(snap_->atomData.particlePot.begin(),
554 snap_->atomData.particlePot.end(), 0.0);
555 }
556
557 if (storageLayout_ & DataStorage::dslDensity) {
558 fill(snap_->atomData.density.begin(),
559 snap_->atomData.density.end(), 0.0);
560 }
561
562 if (storageLayout_ & DataStorage::dslFunctional) {
563 fill(snap_->atomData.functional.begin(),
564 snap_->atomData.functional.end(), 0.0);
565 }
566
567 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
568 fill(snap_->atomData.functionalDerivative.begin(),
569 snap_->atomData.functionalDerivative.end(), 0.0);
570 }
571
572 if (storageLayout_ & DataStorage::dslSkippedCharge) {
573 fill(snap_->atomData.skippedCharge.begin(),
574 snap_->atomData.skippedCharge.end(), 0.0);
575 }
576
577 if (storageLayout_ & DataStorage::dslElectricField) {
578 fill(snap_->atomData.electricField.begin(),
579 snap_->atomData.electricField.end(), V3Zero);
580 }
581 }
582
583
584 void ForceMatrixDecomposition::distributeData() {
585 snap_ = sman_->getCurrentSnapshot();
586 storageLayout_ = sman_->getStorageLayout();
587 #ifdef IS_MPI
588
589 // gather up the atomic positions
590 AtomPlanVectorRow->gather(snap_->atomData.position,
591 atomRowData.position);
592 AtomPlanVectorColumn->gather(snap_->atomData.position,
593 atomColData.position);
594
595 // gather up the cutoff group positions
596
597 cgPlanVectorRow->gather(snap_->cgData.position,
598 cgRowData.position);
599
600 cgPlanVectorColumn->gather(snap_->cgData.position,
601 cgColData.position);
602
603
604 // if needed, gather the atomic rotation matrices
605 if (storageLayout_ & DataStorage::dslAmat) {
606 AtomPlanMatrixRow->gather(snap_->atomData.aMat,
607 atomRowData.aMat);
608 AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
609 atomColData.aMat);
610 }
611
612 // if needed, gather the atomic eletrostatic frames
613 if (storageLayout_ & DataStorage::dslElectroFrame) {
614 AtomPlanMatrixRow->gather(snap_->atomData.electroFrame,
615 atomRowData.electroFrame);
616 AtomPlanMatrixColumn->gather(snap_->atomData.electroFrame,
617 atomColData.electroFrame);
618 }
619
620 // if needed, gather the atomic fluctuating charge values
621 if (storageLayout_ & DataStorage::dslFlucQPosition) {
622 AtomPlanRealRow->gather(snap_->atomData.flucQPos,
623 atomRowData.flucQPos);
624 AtomPlanRealColumn->gather(snap_->atomData.flucQPos,
625 atomColData.flucQPos);
626 }
627
628 #endif
629 }
630
631 /* collects information obtained during the pre-pair loop onto local
632 * data structures.
633 */
634 void ForceMatrixDecomposition::collectIntermediateData() {
635 snap_ = sman_->getCurrentSnapshot();
636 storageLayout_ = sman_->getStorageLayout();
637 #ifdef IS_MPI
638
639 if (storageLayout_ & DataStorage::dslDensity) {
640
641 AtomPlanRealRow->scatter(atomRowData.density,
642 snap_->atomData.density);
643
644 int n = snap_->atomData.density.size();
645 vector<RealType> rho_tmp(n, 0.0);
646 AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
647 for (int i = 0; i < n; i++)
648 snap_->atomData.density[i] += rho_tmp[i];
649 }
650
651 if (storageLayout_ & DataStorage::dslElectricField) {
652
653 AtomPlanVectorRow->scatter(atomRowData.electricField,
654 snap_->atomData.electricField);
655
656 int n = snap_->atomData.electricField.size();
657 vector<Vector3d> field_tmp(n, V3Zero);
658 AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp);
659 for (int i = 0; i < n; i++)
660 snap_->atomData.electricField[i] += field_tmp[i];
661 }
662 #endif
663 }
664
665 /*
666 * redistributes information obtained during the pre-pair loop out to
667 * row and column-indexed data structures
668 */
669 void ForceMatrixDecomposition::distributeIntermediateData() {
670 snap_ = sman_->getCurrentSnapshot();
671 storageLayout_ = sman_->getStorageLayout();
672 #ifdef IS_MPI
673 if (storageLayout_ & DataStorage::dslFunctional) {
674 AtomPlanRealRow->gather(snap_->atomData.functional,
675 atomRowData.functional);
676 AtomPlanRealColumn->gather(snap_->atomData.functional,
677 atomColData.functional);
678 }
679
680 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
681 AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
682 atomRowData.functionalDerivative);
683 AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
684 atomColData.functionalDerivative);
685 }
686 #endif
687 }
688
689
690 void ForceMatrixDecomposition::collectData() {
691 snap_ = sman_->getCurrentSnapshot();
692 storageLayout_ = sman_->getStorageLayout();
693 #ifdef IS_MPI
694 int n = snap_->atomData.force.size();
695 vector<Vector3d> frc_tmp(n, V3Zero);
696
697 AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
698 for (int i = 0; i < n; i++) {
699 snap_->atomData.force[i] += frc_tmp[i];
700 frc_tmp[i] = 0.0;
701 }
702
703 AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
704 for (int i = 0; i < n; i++) {
705 snap_->atomData.force[i] += frc_tmp[i];
706 }
707
708 if (storageLayout_ & DataStorage::dslTorque) {
709
710 int nt = snap_->atomData.torque.size();
711 vector<Vector3d> trq_tmp(nt, V3Zero);
712
713 AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
714 for (int i = 0; i < nt; i++) {
715 snap_->atomData.torque[i] += trq_tmp[i];
716 trq_tmp[i] = 0.0;
717 }
718
719 AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
720 for (int i = 0; i < nt; i++)
721 snap_->atomData.torque[i] += trq_tmp[i];
722 }
723
724 if (storageLayout_ & DataStorage::dslSkippedCharge) {
725
726 int ns = snap_->atomData.skippedCharge.size();
727 vector<RealType> skch_tmp(ns, 0.0);
728
729 AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
730 for (int i = 0; i < ns; i++) {
731 snap_->atomData.skippedCharge[i] += skch_tmp[i];
732 skch_tmp[i] = 0.0;
733 }
734
735 AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
736 for (int i = 0; i < ns; i++)
737 snap_->atomData.skippedCharge[i] += skch_tmp[i];
738
739 }
740
741 if (storageLayout_ & DataStorage::dslFlucQForce) {
742
743 int nq = snap_->atomData.flucQFrc.size();
744 vector<RealType> fqfrc_tmp(nq, 0.0);
745
746 AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp);
747 for (int i = 0; i < nq; i++) {
748 snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
749 fqfrc_tmp[i] = 0.0;
750 }
751
752 AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp);
753 for (int i = 0; i < nq; i++)
754 snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
755
756 }
757
758 nLocal_ = snap_->getNumberOfAtoms();
759
760 vector<potVec> pot_temp(nLocal_,
761 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
762
763 // scatter/gather pot_row into the members of my column
764
765 AtomPlanPotRow->scatter(pot_row, pot_temp);
766
767 for (int ii = 0; ii < pot_temp.size(); ii++ )
768 pairwisePot += pot_temp[ii];
769
770 fill(pot_temp.begin(), pot_temp.end(),
771 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
772
773 AtomPlanPotColumn->scatter(pot_col, pot_temp);
774
775 for (int ii = 0; ii < pot_temp.size(); ii++ )
776 pairwisePot += pot_temp[ii];
777
778 for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
779 RealType ploc1 = pairwisePot[ii];
780 RealType ploc2 = 0.0;
781 MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
782 pairwisePot[ii] = ploc2;
783 }
784
785 for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
786 RealType ploc1 = embeddingPot[ii];
787 RealType ploc2 = 0.0;
788 MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
789 embeddingPot[ii] = ploc2;
790 }
791
792 #endif
793
794 }
795
796 int ForceMatrixDecomposition::getNAtomsInRow() {
797 #ifdef IS_MPI
798 return nAtomsInRow_;
799 #else
800 return nLocal_;
801 #endif
802 }
803
804 /**
805 * returns the list of atoms belonging to this group.
806 */
807 vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
808 #ifdef IS_MPI
809 return groupListRow_[cg1];
810 #else
811 return groupList_[cg1];
812 #endif
813 }
814
815 vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
816 #ifdef IS_MPI
817 return groupListCol_[cg2];
818 #else
819 return groupList_[cg2];
820 #endif
821 }
822
823 Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
824 Vector3d d;
825
826 #ifdef IS_MPI
827 d = cgColData.position[cg2] - cgRowData.position[cg1];
828 #else
829 d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
830 #endif
831
832 snap_->wrapVector(d);
833 return d;
834 }
835
836
837 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
838
839 Vector3d d;
840
841 #ifdef IS_MPI
842 d = cgRowData.position[cg1] - atomRowData.position[atom1];
843 #else
844 d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
845 #endif
846
847 snap_->wrapVector(d);
848 return d;
849 }
850
851 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
852 Vector3d d;
853
854 #ifdef IS_MPI
855 d = cgColData.position[cg2] - atomColData.position[atom2];
856 #else
857 d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
858 #endif
859
860 snap_->wrapVector(d);
861 return d;
862 }
863
864 RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
865 #ifdef IS_MPI
866 return massFactorsRow[atom1];
867 #else
868 return massFactors[atom1];
869 #endif
870 }
871
872 RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
873 #ifdef IS_MPI
874 return massFactorsCol[atom2];
875 #else
876 return massFactors[atom2];
877 #endif
878
879 }
880
881 Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
882 Vector3d d;
883
884 #ifdef IS_MPI
885 d = atomColData.position[atom2] - atomRowData.position[atom1];
886 #else
887 d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
888 #endif
889
890 snap_->wrapVector(d);
891 return d;
892 }
893
894 vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
895 return excludesForAtom[atom1];
896 }
897
898 /**
899 * We need to exclude some overcounted interactions that result from
900 * the parallel decomposition.
901 */
902 bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
903 int unique_id_1, unique_id_2;
904
905 #ifdef IS_MPI
906 // in MPI, we have to look up the unique IDs for each atom
907 unique_id_1 = AtomRowToGlobal[atom1];
908 unique_id_2 = AtomColToGlobal[atom2];
909 #else
910 unique_id_1 = AtomLocalToGlobal[atom1];
911 unique_id_2 = AtomLocalToGlobal[atom2];
912 #endif
913
914 if (unique_id_1 == unique_id_2) return true;
915
916 #ifdef IS_MPI
917 // this prevents us from doing the pair on multiple processors
918 if (unique_id_1 < unique_id_2) {
919 if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
920 } else {
921 if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
922 }
923 #endif
924
925 return false;
926 }
927
928 /**
929 * We need to handle the interactions for atoms who are involved in
930 * the same rigid body as well as some short range interactions
931 * (bonds, bends, torsions) differently from other interactions.
932 * We'll still visit the pairwise routines, but with a flag that
933 * tells those routines to exclude the pair from direct long range
934 * interactions. Some indirect interactions (notably reaction
935 * field) must still be handled for these pairs.
936 */
937 bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
938
939 // excludesForAtom was constructed to use row/column indices in the MPI
940 // version, and to use local IDs in the non-MPI version:
941
942 for (vector<int>::iterator i = excludesForAtom[atom1].begin();
943 i != excludesForAtom[atom1].end(); ++i) {
944 if ( (*i) == atom2 ) return true;
945 }
946
947 return false;
948 }
949
950
951 void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
952 #ifdef IS_MPI
953 atomRowData.force[atom1] += fg;
954 #else
955 snap_->atomData.force[atom1] += fg;
956 #endif
957 }
958
959 void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
960 #ifdef IS_MPI
961 atomColData.force[atom2] += fg;
962 #else
963 snap_->atomData.force[atom2] += fg;
964 #endif
965 }
966
967 // filling interaction blocks with pointers
968 void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
969 int atom1, int atom2) {
970
971 idat.excluded = excludeAtomPair(atom1, atom2);
972
973 #ifdef IS_MPI
974 idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
975 //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
976 // ff_->getAtomType(identsCol[atom2]) );
977
978 if (storageLayout_ & DataStorage::dslAmat) {
979 idat.A1 = &(atomRowData.aMat[atom1]);
980 idat.A2 = &(atomColData.aMat[atom2]);
981 }
982
983 if (storageLayout_ & DataStorage::dslElectroFrame) {
984 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
985 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
986 }
987
988 if (storageLayout_ & DataStorage::dslTorque) {
989 idat.t1 = &(atomRowData.torque[atom1]);
990 idat.t2 = &(atomColData.torque[atom2]);
991 }
992
993 if (storageLayout_ & DataStorage::dslDensity) {
994 idat.rho1 = &(atomRowData.density[atom1]);
995 idat.rho2 = &(atomColData.density[atom2]);
996 }
997
998 if (storageLayout_ & DataStorage::dslFunctional) {
999 idat.frho1 = &(atomRowData.functional[atom1]);
1000 idat.frho2 = &(atomColData.functional[atom2]);
1001 }
1002
1003 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1004 idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1005 idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1006 }
1007
1008 if (storageLayout_ & DataStorage::dslParticlePot) {
1009 idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1010 idat.particlePot2 = &(atomColData.particlePot[atom2]);
1011 }
1012
1013 if (storageLayout_ & DataStorage::dslSkippedCharge) {
1014 idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1015 idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1016 }
1017
1018 if (storageLayout_ & DataStorage::dslFlucQPosition) {
1019 idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1020 idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1021 }
1022
1023 #else
1024
1025
1026 // cerr << "atoms = " << atom1 << " " << atom2 << "\n";
1027 // cerr << "pos1 = " << snap_->atomData.position[atom1] << "\n";
1028 // cerr << "pos2 = " << snap_->atomData.position[atom2] << "\n";
1029
1030 idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
1031 //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
1032 // ff_->getAtomType(idents[atom2]) );
1033
1034 if (storageLayout_ & DataStorage::dslAmat) {
1035 idat.A1 = &(snap_->atomData.aMat[atom1]);
1036 idat.A2 = &(snap_->atomData.aMat[atom2]);
1037 }
1038
1039 if (storageLayout_ & DataStorage::dslElectroFrame) {
1040 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
1041 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
1042 }
1043
1044 if (storageLayout_ & DataStorage::dslTorque) {
1045 idat.t1 = &(snap_->atomData.torque[atom1]);
1046 idat.t2 = &(snap_->atomData.torque[atom2]);
1047 }
1048
1049 if (storageLayout_ & DataStorage::dslDensity) {
1050 idat.rho1 = &(snap_->atomData.density[atom1]);
1051 idat.rho2 = &(snap_->atomData.density[atom2]);
1052 }
1053
1054 if (storageLayout_ & DataStorage::dslFunctional) {
1055 idat.frho1 = &(snap_->atomData.functional[atom1]);
1056 idat.frho2 = &(snap_->atomData.functional[atom2]);
1057 }
1058
1059 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1060 idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1061 idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1062 }
1063
1064 if (storageLayout_ & DataStorage::dslParticlePot) {
1065 idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1066 idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1067 }
1068
1069 if (storageLayout_ & DataStorage::dslSkippedCharge) {
1070 idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1071 idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1072 }
1073
1074 if (storageLayout_ & DataStorage::dslFlucQPosition) {
1075 idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1076 idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1077 }
1078
1079 #endif
1080 }
1081
1082
1083 void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {
1084 #ifdef IS_MPI
1085 pot_row[atom1] += RealType(0.5) * *(idat.pot);
1086 pot_col[atom2] += RealType(0.5) * *(idat.pot);
1087
1088 atomRowData.force[atom1] += *(idat.f1);
1089 atomColData.force[atom2] -= *(idat.f1);
1090
1091 if (storageLayout_ & DataStorage::dslFlucQForce) {
1092 atomRowData.flucQFrc[atom1] += *(idat.dVdFQ1);
1093 atomColData.flucQFrc[atom2] += *(idat.dVdFQ2);
1094 }
1095
1096 if (storageLayout_ & DataStorage::dslElectricField) {
1097 atomRowData.electricField[atom1] += *(idat.eField1);
1098 atomColData.electricField[atom2] += *(idat.eField2);
1099 }
1100
1101 // should particle pot be done here also?
1102 #else
1103 pairwisePot += *(idat.pot);
1104
1105 snap_->atomData.force[atom1] += *(idat.f1);
1106 snap_->atomData.force[atom2] -= *(idat.f1);
1107
1108 if (idat.doParticlePot) {
1109 snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw);
1110 snap_->atomData.particlePot[atom2] -= *(idat.vpair) * *(idat.sw);
1111 }
1112
1113 if (storageLayout_ & DataStorage::dslFlucQForce) {
1114 snap_->atomData.flucQFrc[atom1] += *(idat.dVdFQ1);
1115 snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1116 }
1117
1118 if (storageLayout_ & DataStorage::dslElectricField) {
1119 snap_->atomData.electricField[atom1] += *(idat.eField1);
1120 snap_->atomData.electricField[atom2] += *(idat.eField2);
1121 }
1122
1123 #endif
1124
1125 }
1126
1127 /*
1128 * buildNeighborList
1129 *
1130 * first element of pair is row-indexed CutoffGroup
1131 * second element of pair is column-indexed CutoffGroup
1132 */
1133 vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1134
1135 vector<pair<int, int> > neighborList;
1136 groupCutoffs cuts;
1137 bool doAllPairs = false;
1138
1139 #ifdef IS_MPI
1140 cellListRow_.clear();
1141 cellListCol_.clear();
1142 #else
1143 cellList_.clear();
1144 #endif
1145
1146 RealType rList_ = (largestRcut_ + skinThickness_);
1147 RealType rl2 = rList_ * rList_;
1148 Snapshot* snap_ = sman_->getCurrentSnapshot();
1149 Mat3x3d Hmat = snap_->getHmat();
1150 Vector3d Hx = Hmat.getColumn(0);
1151 Vector3d Hy = Hmat.getColumn(1);
1152 Vector3d Hz = Hmat.getColumn(2);
1153
1154 nCells_.x() = (int) ( Hx.length() )/ rList_;
1155 nCells_.y() = (int) ( Hy.length() )/ rList_;
1156 nCells_.z() = (int) ( Hz.length() )/ rList_;
1157
1158 // handle small boxes where the cell offsets can end up repeating cells
1159
1160 if (nCells_.x() < 3) doAllPairs = true;
1161 if (nCells_.y() < 3) doAllPairs = true;
1162 if (nCells_.z() < 3) doAllPairs = true;
1163
1164 Mat3x3d invHmat = snap_->getInvHmat();
1165 Vector3d rs, scaled, dr;
1166 Vector3i whichCell;
1167 int cellIndex;
1168 int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1169
1170 #ifdef IS_MPI
1171 cellListRow_.resize(nCtot);
1172 cellListCol_.resize(nCtot);
1173 #else
1174 cellList_.resize(nCtot);
1175 #endif
1176
1177 if (!doAllPairs) {
1178 #ifdef IS_MPI
1179
1180 for (int i = 0; i < nGroupsInRow_; i++) {
1181 rs = cgRowData.position[i];
1182
1183 // scaled positions relative to the box vectors
1184 scaled = invHmat * rs;
1185
1186 // wrap the vector back into the unit box by subtracting integer box
1187 // numbers
1188 for (int j = 0; j < 3; j++) {
1189 scaled[j] -= roundMe(scaled[j]);
1190 scaled[j] += 0.5;
1191 }
1192
1193 // find xyz-indices of cell that cutoffGroup is in.
1194 whichCell.x() = nCells_.x() * scaled.x();
1195 whichCell.y() = nCells_.y() * scaled.y();
1196 whichCell.z() = nCells_.z() * scaled.z();
1197
1198 // find single index of this cell:
1199 cellIndex = Vlinear(whichCell, nCells_);
1200
1201 // add this cutoff group to the list of groups in this cell;
1202 cellListRow_[cellIndex].push_back(i);
1203 }
1204 for (int i = 0; i < nGroupsInCol_; i++) {
1205 rs = cgColData.position[i];
1206
1207 // scaled positions relative to the box vectors
1208 scaled = invHmat * rs;
1209
1210 // wrap the vector back into the unit box by subtracting integer box
1211 // numbers
1212 for (int j = 0; j < 3; j++) {
1213 scaled[j] -= roundMe(scaled[j]);
1214 scaled[j] += 0.5;
1215 }
1216
1217 // find xyz-indices of cell that cutoffGroup is in.
1218 whichCell.x() = nCells_.x() * scaled.x();
1219 whichCell.y() = nCells_.y() * scaled.y();
1220 whichCell.z() = nCells_.z() * scaled.z();
1221
1222 // find single index of this cell:
1223 cellIndex = Vlinear(whichCell, nCells_);
1224
1225 // add this cutoff group to the list of groups in this cell;
1226 cellListCol_[cellIndex].push_back(i);
1227 }
1228
1229 #else
1230 for (int i = 0; i < nGroups_; i++) {
1231 rs = snap_->cgData.position[i];
1232
1233 // scaled positions relative to the box vectors
1234 scaled = invHmat * rs;
1235
1236 // wrap the vector back into the unit box by subtracting integer box
1237 // numbers
1238 for (int j = 0; j < 3; j++) {
1239 scaled[j] -= roundMe(scaled[j]);
1240 scaled[j] += 0.5;
1241 }
1242
1243 // find xyz-indices of cell that cutoffGroup is in.
1244 whichCell.x() = nCells_.x() * scaled.x();
1245 whichCell.y() = nCells_.y() * scaled.y();
1246 whichCell.z() = nCells_.z() * scaled.z();
1247
1248 // find single index of this cell:
1249 cellIndex = Vlinear(whichCell, nCells_);
1250
1251 // add this cutoff group to the list of groups in this cell;
1252 cellList_[cellIndex].push_back(i);
1253 }
1254
1255 #endif
1256
1257 for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1258 for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1259 for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1260 Vector3i m1v(m1x, m1y, m1z);
1261 int m1 = Vlinear(m1v, nCells_);
1262
1263 for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1264 os != cellOffsets_.end(); ++os) {
1265
1266 Vector3i m2v = m1v + (*os);
1267
1268
1269 if (m2v.x() >= nCells_.x()) {
1270 m2v.x() = 0;
1271 } else if (m2v.x() < 0) {
1272 m2v.x() = nCells_.x() - 1;
1273 }
1274
1275 if (m2v.y() >= nCells_.y()) {
1276 m2v.y() = 0;
1277 } else if (m2v.y() < 0) {
1278 m2v.y() = nCells_.y() - 1;
1279 }
1280
1281 if (m2v.z() >= nCells_.z()) {
1282 m2v.z() = 0;
1283 } else if (m2v.z() < 0) {
1284 m2v.z() = nCells_.z() - 1;
1285 }
1286
1287 int m2 = Vlinear (m2v, nCells_);
1288
1289 #ifdef IS_MPI
1290 for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1291 j1 != cellListRow_[m1].end(); ++j1) {
1292 for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1293 j2 != cellListCol_[m2].end(); ++j2) {
1294
1295 // In parallel, we need to visit *all* pairs of row
1296 // & column indicies and will divide labor in the
1297 // force evaluation later.
1298 dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1299 snap_->wrapVector(dr);
1300 cuts = getGroupCutoffs( (*j1), (*j2) );
1301 if (dr.lengthSquare() < cuts.third) {
1302 neighborList.push_back(make_pair((*j1), (*j2)));
1303 }
1304 }
1305 }
1306 #else
1307 for (vector<int>::iterator j1 = cellList_[m1].begin();
1308 j1 != cellList_[m1].end(); ++j1) {
1309 for (vector<int>::iterator j2 = cellList_[m2].begin();
1310 j2 != cellList_[m2].end(); ++j2) {
1311
1312 // Always do this if we're in different cells or if
1313 // we're in the same cell and the global index of
1314 // the j2 cutoff group is greater than or equal to
1315 // the j1 cutoff group. Note that Rappaport's code
1316 // has a "less than" conditional here, but that
1317 // deals with atom-by-atom computation. OpenMD
1318 // allows atoms within a single cutoff group to
1319 // interact with each other.
1320
1321
1322
1323 if (m2 != m1 || (*j2) >= (*j1) ) {
1324
1325 dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1326 snap_->wrapVector(dr);
1327 cuts = getGroupCutoffs( (*j1), (*j2) );
1328 if (dr.lengthSquare() < cuts.third) {
1329 neighborList.push_back(make_pair((*j1), (*j2)));
1330 }
1331 }
1332 }
1333 }
1334 #endif
1335 }
1336 }
1337 }
1338 }
1339 } else {
1340 // branch to do all cutoff group pairs
1341 #ifdef IS_MPI
1342 for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1343 for (int j2 = 0; j2 < nGroupsInCol_; j2++) {
1344 dr = cgColData.position[j2] - cgRowData.position[j1];
1345 snap_->wrapVector(dr);
1346 cuts = getGroupCutoffs( j1, j2 );
1347 if (dr.lengthSquare() < cuts.third) {
1348 neighborList.push_back(make_pair(j1, j2));
1349 }
1350 }
1351 }
1352 #else
1353 // include all groups here.
1354 for (int j1 = 0; j1 < nGroups_; j1++) {
1355 // include self group interactions j2 == j1
1356 for (int j2 = j1; j2 < nGroups_; j2++) {
1357 dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1358 snap_->wrapVector(dr);
1359 cuts = getGroupCutoffs( j1, j2 );
1360 if (dr.lengthSquare() < cuts.third) {
1361 neighborList.push_back(make_pair(j1, j2));
1362 }
1363 }
1364 }
1365 #endif
1366 }
1367
1368 // save the local cutoff group positions for the check that is
1369 // done on each loop:
1370 saved_CG_positions_.clear();
1371 for (int i = 0; i < nGroups_; i++)
1372 saved_CG_positions_.push_back(snap_->cgData.position[i]);
1373
1374 return neighborList;
1375 }
1376 } //end namespace OpenMD