ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/development/src/parallel/ForceMatrixDecomposition.cpp
Revision: 1592
Committed: Tue Jul 12 20:33:14 2011 UTC (13 years, 9 months ago) by gezelter
File size: 39919 byte(s)
Log Message:
test

File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
15 * distribution.
16 *
17 * This software is provided "AS IS," without a warranty of any
18 * kind. All express or implied conditions, representations and
19 * warranties, including any implied warranty of merchantability,
20 * fitness for a particular purpose or non-infringement, are hereby
21 * excluded. The University of Notre Dame and its licensors shall not
22 * be liable for any damages suffered by licensee as a result of
23 * using, modifying or distributing the software or its
24 * derivatives. In no event will the University of Notre Dame or its
25 * licensors be liable for any lost revenue, profit or data, or for
26 * direct, indirect, special, consequential, incidental or punitive
27 * damages, however caused and regardless of the theory of liability,
28 * arising out of the use of or inability to use software, even if the
29 * University of Notre Dame has been advised of the possibility of
30 * such damages.
31 *
32 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 * research, please cite the appropriate papers when you publish your
34 * work. Good starting points are:
35 *
36 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 * [4] Vardeman & Gezelter, in progress (2009).
40 */
41 #include "parallel/ForceMatrixDecomposition.hpp"
42 #include "math/SquareMatrix3.hpp"
43 #include "nonbonded/NonBondedInteraction.hpp"
44 #include "brains/SnapshotManager.hpp"
45 #include "brains/PairList.hpp"
46
47 using namespace std;
48 namespace OpenMD {
49
50 /**
51 * distributeInitialData is essentially a copy of the older fortran
52 * SimulationSetup
53 */
54
55 void ForceMatrixDecomposition::distributeInitialData() {
56 snap_ = sman_->getCurrentSnapshot();
57 storageLayout_ = sman_->getStorageLayout();
58 ff_ = info_->getForceField();
59 nLocal_ = snap_->getNumberOfAtoms();
60
61 nGroups_ = info_->getNLocalCutoffGroups();
62 // gather the information for atomtype IDs (atids):
63 idents = info_->getIdentArray();
64 AtomLocalToGlobal = info_->getGlobalAtomIndices();
65 cgLocalToGlobal = info_->getGlobalGroupIndices();
66 vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
67
68 massFactors = info_->getMassFactors();
69
70 PairList* excludes = info_->getExcludedInteractions();
71 PairList* oneTwo = info_->getOneTwoInteractions();
72 PairList* oneThree = info_->getOneThreeInteractions();
73 PairList* oneFour = info_->getOneFourInteractions();
74
75 #ifdef IS_MPI
76
77 AtomCommIntRow = new Communicator<Row,int>(nLocal_);
78 AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
79 AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
80 AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
81 AtomCommPotRow = new Communicator<Row,potVec>(nLocal_);
82
83 AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
84 AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
85 AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
86 AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
87 AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_);
88
89 cgCommIntRow = new Communicator<Row,int>(nGroups_);
90 cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
91 cgCommIntColumn = new Communicator<Column,int>(nGroups_);
92 cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
93
94 nAtomsInRow_ = AtomCommIntRow->getSize();
95 nAtomsInCol_ = AtomCommIntColumn->getSize();
96 nGroupsInRow_ = cgCommIntRow->getSize();
97 nGroupsInCol_ = cgCommIntColumn->getSize();
98
99 // Modify the data storage objects with the correct layouts and sizes:
100 atomRowData.resize(nAtomsInRow_);
101 atomRowData.setStorageLayout(storageLayout_);
102 atomColData.resize(nAtomsInCol_);
103 atomColData.setStorageLayout(storageLayout_);
104 cgRowData.resize(nGroupsInRow_);
105 cgRowData.setStorageLayout(DataStorage::dslPosition);
106 cgColData.resize(nGroupsInCol_);
107 cgColData.setStorageLayout(DataStorage::dslPosition);
108
109 identsRow.resize(nAtomsInRow_);
110 identsCol.resize(nAtomsInCol_);
111
112 AtomCommIntRow->gather(idents, identsRow);
113 AtomCommIntColumn->gather(idents, identsCol);
114
115 // allocate memory for the parallel objects
116 atypesRow.resize(nAtomsInRow_);
117 atypesCol.resize(nAtomsInCol_);
118
119 for (int i = 0; i < nAtomsInRow_; i++)
120 atypesRow[i] = ff_->getAtomType(identsRow[i]);
121 for (int i = 0; i < nAtomsInCol_; i++)
122 atypesCol[i] = ff_->getAtomType(identsCol[i]);
123
124 pot_row.resize(nAtomsInRow_);
125 pot_col.resize(nAtomsInCol_);
126
127 AtomRowToGlobal.resize(nAtomsInRow_);
128 AtomColToGlobal.resize(nAtomsInCol_);
129 AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
130 AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
131
132 cgRowToGlobal.resize(nGroupsInRow_);
133 cgColToGlobal.resize(nGroupsInCol_);
134 cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
135 cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
136
137 massFactorsRow.resize(nAtomsInRow_);
138 massFactorsCol.resize(nAtomsInCol_);
139 AtomCommRealRow->gather(massFactors, massFactorsRow);
140 AtomCommRealColumn->gather(massFactors, massFactorsCol);
141
142 groupListRow_.clear();
143 groupListRow_.resize(nGroupsInRow_);
144 for (int i = 0; i < nGroupsInRow_; i++) {
145 int gid = cgRowToGlobal[i];
146 for (int j = 0; j < nAtomsInRow_; j++) {
147 int aid = AtomRowToGlobal[j];
148 if (globalGroupMembership[aid] == gid)
149 groupListRow_[i].push_back(j);
150 }
151 }
152
153 groupListCol_.clear();
154 groupListCol_.resize(nGroupsInCol_);
155 for (int i = 0; i < nGroupsInCol_; i++) {
156 int gid = cgColToGlobal[i];
157 for (int j = 0; j < nAtomsInCol_; j++) {
158 int aid = AtomColToGlobal[j];
159 if (globalGroupMembership[aid] == gid)
160 groupListCol_[i].push_back(j);
161 }
162 }
163
164 excludesForAtom.clear();
165 excludesForAtom.resize(nAtomsInRow_);
166 toposForAtom.clear();
167 toposForAtom.resize(nAtomsInRow_);
168 topoDist.clear();
169 topoDist.resize(nAtomsInRow_);
170 for (int i = 0; i < nAtomsInRow_; i++) {
171 int iglob = AtomRowToGlobal[i];
172
173 for (int j = 0; j < nAtomsInCol_; j++) {
174 int jglob = AtomColToGlobal[j];
175
176 if (excludes->hasPair(iglob, jglob))
177 excludesForAtom[i].push_back(j);
178
179 if (oneTwo->hasPair(iglob, jglob)) {
180 toposForAtom[i].push_back(j);
181 topoDist[i].push_back(1);
182 } else {
183 if (oneThree->hasPair(iglob, jglob)) {
184 toposForAtom[i].push_back(j);
185 topoDist[i].push_back(2);
186 } else {
187 if (oneFour->hasPair(iglob, jglob)) {
188 toposForAtom[i].push_back(j);
189 topoDist[i].push_back(3);
190 }
191 }
192 }
193 }
194 }
195
196 #endif
197
198 // allocate memory for the parallel objects
199 atypesLocal.resize(nLocal_);
200
201 for (int i = 0; i < nLocal_; i++)
202 atypesLocal[i] = ff_->getAtomType(idents[i]);
203
204 groupList_.clear();
205 groupList_.resize(nGroups_);
206 for (int i = 0; i < nGroups_; i++) {
207 int gid = cgLocalToGlobal[i];
208 for (int j = 0; j < nLocal_; j++) {
209 int aid = AtomLocalToGlobal[j];
210 if (globalGroupMembership[aid] == gid) {
211 groupList_[i].push_back(j);
212 }
213 }
214 }
215
216 excludesForAtom.clear();
217 excludesForAtom.resize(nLocal_);
218 toposForAtom.clear();
219 toposForAtom.resize(nLocal_);
220 topoDist.clear();
221 topoDist.resize(nLocal_);
222
223 for (int i = 0; i < nLocal_; i++) {
224 int iglob = AtomLocalToGlobal[i];
225
226 for (int j = 0; j < nLocal_; j++) {
227 int jglob = AtomLocalToGlobal[j];
228
229 if (excludes->hasPair(iglob, jglob))
230 excludesForAtom[i].push_back(j);
231
232 if (oneTwo->hasPair(iglob, jglob)) {
233 toposForAtom[i].push_back(j);
234 topoDist[i].push_back(1);
235 } else {
236 if (oneThree->hasPair(iglob, jglob)) {
237 toposForAtom[i].push_back(j);
238 topoDist[i].push_back(2);
239 } else {
240 if (oneFour->hasPair(iglob, jglob)) {
241 toposForAtom[i].push_back(j);
242 topoDist[i].push_back(3);
243 }
244 }
245 }
246 }
247 }
248
249 createGtypeCutoffMap();
250
251 }
252
253 void ForceMatrixDecomposition::createGtypeCutoffMap() {
254
255 RealType tol = 1e-6;
256 largestRcut_ = 0.0;
257 RealType rc;
258 int atid;
259 set<AtomType*> atypes = info_->getSimulatedAtomTypes();
260
261 map<int, RealType> atypeCutoff;
262
263 for (set<AtomType*>::iterator at = atypes.begin();
264 at != atypes.end(); ++at){
265 atid = (*at)->getIdent();
266 if (userChoseCutoff_)
267 atypeCutoff[atid] = userCutoff_;
268 else
269 atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
270 }
271
272 vector<RealType> gTypeCutoffs;
273 // first we do a single loop over the cutoff groups to find the
274 // largest cutoff for any atypes present in this group.
275 #ifdef IS_MPI
276 vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
277 groupRowToGtype.resize(nGroupsInRow_);
278 for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
279 vector<int> atomListRow = getAtomsInGroupRow(cg1);
280 for (vector<int>::iterator ia = atomListRow.begin();
281 ia != atomListRow.end(); ++ia) {
282 int atom1 = (*ia);
283 atid = identsRow[atom1];
284 if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
285 groupCutoffRow[cg1] = atypeCutoff[atid];
286 }
287 }
288
289 bool gTypeFound = false;
290 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
291 if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
292 groupRowToGtype[cg1] = gt;
293 gTypeFound = true;
294 }
295 }
296 if (!gTypeFound) {
297 gTypeCutoffs.push_back( groupCutoffRow[cg1] );
298 groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
299 }
300
301 }
302 vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
303 groupColToGtype.resize(nGroupsInCol_);
304 for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
305 vector<int> atomListCol = getAtomsInGroupColumn(cg2);
306 for (vector<int>::iterator jb = atomListCol.begin();
307 jb != atomListCol.end(); ++jb) {
308 int atom2 = (*jb);
309 atid = identsCol[atom2];
310 if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
311 groupCutoffCol[cg2] = atypeCutoff[atid];
312 }
313 }
314 bool gTypeFound = false;
315 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
316 if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
317 groupColToGtype[cg2] = gt;
318 gTypeFound = true;
319 }
320 }
321 if (!gTypeFound) {
322 gTypeCutoffs.push_back( groupCutoffCol[cg2] );
323 groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
324 }
325 }
326 #else
327
328 vector<RealType> groupCutoff(nGroups_, 0.0);
329 groupToGtype.resize(nGroups_);
330 for (int cg1 = 0; cg1 < nGroups_; cg1++) {
331 groupCutoff[cg1] = 0.0;
332 vector<int> atomList = getAtomsInGroupRow(cg1);
333 for (vector<int>::iterator ia = atomList.begin();
334 ia != atomList.end(); ++ia) {
335 int atom1 = (*ia);
336 atid = idents[atom1];
337 if (atypeCutoff[atid] > groupCutoff[cg1])
338 groupCutoff[cg1] = atypeCutoff[atid];
339 }
340
341 bool gTypeFound = false;
342 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
343 if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
344 groupToGtype[cg1] = gt;
345 gTypeFound = true;
346 }
347 }
348 if (!gTypeFound) {
349 gTypeCutoffs.push_back( groupCutoff[cg1] );
350 groupToGtype[cg1] = gTypeCutoffs.size() - 1;
351 }
352 }
353 #endif
354
355 // Now we find the maximum group cutoff value present in the simulation
356
357 RealType groupMax = *max_element(gTypeCutoffs.begin(),
358 gTypeCutoffs.end());
359
360 #ifdef IS_MPI
361 MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE,
362 MPI::MAX);
363 #endif
364
365 RealType tradRcut = groupMax;
366
367 for (int i = 0; i < gTypeCutoffs.size(); i++) {
368 for (int j = 0; j < gTypeCutoffs.size(); j++) {
369 RealType thisRcut;
370 switch(cutoffPolicy_) {
371 case TRADITIONAL:
372 thisRcut = tradRcut;
373 break;
374 case MIX:
375 thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
376 break;
377 case MAX:
378 thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
379 break;
380 default:
381 sprintf(painCave.errMsg,
382 "ForceMatrixDecomposition::createGtypeCutoffMap "
383 "hit an unknown cutoff policy!\n");
384 painCave.severity = OPENMD_ERROR;
385 painCave.isFatal = 1;
386 simError();
387 break;
388 }
389
390 pair<int,int> key = make_pair(i,j);
391 gTypeCutoffMap[key].first = thisRcut;
392 if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
393 gTypeCutoffMap[key].second = thisRcut*thisRcut;
394 gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
395 // sanity check
396
397 if (userChoseCutoff_) {
398 if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
399 sprintf(painCave.errMsg,
400 "ForceMatrixDecomposition::createGtypeCutoffMap "
401 "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
402 painCave.severity = OPENMD_ERROR;
403 painCave.isFatal = 1;
404 simError();
405 }
406 }
407 }
408 }
409 }
410
411
412 groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
413 int i, j;
414 #ifdef IS_MPI
415 i = groupRowToGtype[cg1];
416 j = groupColToGtype[cg2];
417 #else
418 i = groupToGtype[cg1];
419 j = groupToGtype[cg2];
420 #endif
421 return gTypeCutoffMap[make_pair(i,j)];
422 }
423
424 int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
425 for (int j = 0; j < toposForAtom[atom1].size(); j++) {
426 if (toposForAtom[atom1][j] == atom2)
427 return topoDist[atom1][j];
428 }
429 return 0;
430 }
431
432 void ForceMatrixDecomposition::zeroWorkArrays() {
433 pairwisePot = 0.0;
434 embeddingPot = 0.0;
435
436 #ifdef IS_MPI
437 if (storageLayout_ & DataStorage::dslForce) {
438 fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
439 fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
440 }
441
442 if (storageLayout_ & DataStorage::dslTorque) {
443 fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
444 fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
445 }
446
447 fill(pot_row.begin(), pot_row.end(),
448 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
449
450 fill(pot_col.begin(), pot_col.end(),
451 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
452
453 if (storageLayout_ & DataStorage::dslParticlePot) {
454 fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
455 0.0);
456 fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
457 0.0);
458 }
459
460 if (storageLayout_ & DataStorage::dslDensity) {
461 fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
462 fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
463 }
464
465 if (storageLayout_ & DataStorage::dslFunctional) {
466 fill(atomRowData.functional.begin(), atomRowData.functional.end(),
467 0.0);
468 fill(atomColData.functional.begin(), atomColData.functional.end(),
469 0.0);
470 }
471
472 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
473 fill(atomRowData.functionalDerivative.begin(),
474 atomRowData.functionalDerivative.end(), 0.0);
475 fill(atomColData.functionalDerivative.begin(),
476 atomColData.functionalDerivative.end(), 0.0);
477 }
478
479 if (storageLayout_ & DataStorage::dslSkippedCharge) {
480 fill(atomRowData.skippedCharge.begin(),
481 atomRowData.skippedCharge.end(), 0.0);
482 fill(atomColData.skippedCharge.begin(),
483 atomColData.skippedCharge.end(), 0.0);
484 }
485
486 #endif
487 // even in parallel, we need to zero out the local arrays:
488
489 if (storageLayout_ & DataStorage::dslParticlePot) {
490 fill(snap_->atomData.particlePot.begin(),
491 snap_->atomData.particlePot.end(), 0.0);
492 }
493
494 if (storageLayout_ & DataStorage::dslDensity) {
495 fill(snap_->atomData.density.begin(),
496 snap_->atomData.density.end(), 0.0);
497 }
498 if (storageLayout_ & DataStorage::dslFunctional) {
499 fill(snap_->atomData.functional.begin(),
500 snap_->atomData.functional.end(), 0.0);
501 }
502 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
503 fill(snap_->atomData.functionalDerivative.begin(),
504 snap_->atomData.functionalDerivative.end(), 0.0);
505 }
506 if (storageLayout_ & DataStorage::dslSkippedCharge) {
507 fill(snap_->atomData.skippedCharge.begin(),
508 snap_->atomData.skippedCharge.end(), 0.0);
509 }
510
511 }
512
513
514 void ForceMatrixDecomposition::distributeData() {
515 snap_ = sman_->getCurrentSnapshot();
516 storageLayout_ = sman_->getStorageLayout();
517 #ifdef IS_MPI
518
519 // gather up the atomic positions
520 AtomCommVectorRow->gather(snap_->atomData.position,
521 atomRowData.position);
522 AtomCommVectorColumn->gather(snap_->atomData.position,
523 atomColData.position);
524
525 // gather up the cutoff group positions
526 cgCommVectorRow->gather(snap_->cgData.position,
527 cgRowData.position);
528 cgCommVectorColumn->gather(snap_->cgData.position,
529 cgColData.position);
530
531 // if needed, gather the atomic rotation matrices
532 if (storageLayout_ & DataStorage::dslAmat) {
533 AtomCommMatrixRow->gather(snap_->atomData.aMat,
534 atomRowData.aMat);
535 AtomCommMatrixColumn->gather(snap_->atomData.aMat,
536 atomColData.aMat);
537 }
538
539 // if needed, gather the atomic eletrostatic frames
540 if (storageLayout_ & DataStorage::dslElectroFrame) {
541 AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
542 atomRowData.electroFrame);
543 AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
544 atomColData.electroFrame);
545 }
546
547 #endif
548 }
549
550 /* collects information obtained during the pre-pair loop onto local
551 * data structures.
552 */
553 void ForceMatrixDecomposition::collectIntermediateData() {
554 snap_ = sman_->getCurrentSnapshot();
555 storageLayout_ = sman_->getStorageLayout();
556 #ifdef IS_MPI
557
558 if (storageLayout_ & DataStorage::dslDensity) {
559
560 AtomCommRealRow->scatter(atomRowData.density,
561 snap_->atomData.density);
562
563 int n = snap_->atomData.density.size();
564 vector<RealType> rho_tmp(n, 0.0);
565 AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
566 for (int i = 0; i < n; i++)
567 snap_->atomData.density[i] += rho_tmp[i];
568 }
569 #endif
570 }
571
572 /*
573 * redistributes information obtained during the pre-pair loop out to
574 * row and column-indexed data structures
575 */
576 void ForceMatrixDecomposition::distributeIntermediateData() {
577 snap_ = sman_->getCurrentSnapshot();
578 storageLayout_ = sman_->getStorageLayout();
579 #ifdef IS_MPI
580 if (storageLayout_ & DataStorage::dslFunctional) {
581 AtomCommRealRow->gather(snap_->atomData.functional,
582 atomRowData.functional);
583 AtomCommRealColumn->gather(snap_->atomData.functional,
584 atomColData.functional);
585 }
586
587 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
588 AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
589 atomRowData.functionalDerivative);
590 AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
591 atomColData.functionalDerivative);
592 }
593 #endif
594 }
595
596
597 void ForceMatrixDecomposition::collectData() {
598 snap_ = sman_->getCurrentSnapshot();
599 storageLayout_ = sman_->getStorageLayout();
600 #ifdef IS_MPI
601 int n = snap_->atomData.force.size();
602 vector<Vector3d> frc_tmp(n, V3Zero);
603
604 AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
605 for (int i = 0; i < n; i++) {
606 snap_->atomData.force[i] += frc_tmp[i];
607 frc_tmp[i] = 0.0;
608 }
609
610 AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
611 for (int i = 0; i < n; i++)
612 snap_->atomData.force[i] += frc_tmp[i];
613
614 if (storageLayout_ & DataStorage::dslTorque) {
615
616 int nt = snap_->atomData.torque.size();
617 vector<Vector3d> trq_tmp(nt, V3Zero);
618
619 AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
620 for (int i = 0; i < nt; i++) {
621 snap_->atomData.torque[i] += trq_tmp[i];
622 trq_tmp[i] = 0.0;
623 }
624
625 AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
626 for (int i = 0; i < nt; i++)
627 snap_->atomData.torque[i] += trq_tmp[i];
628 }
629
630 if (storageLayout_ & DataStorage::dslSkippedCharge) {
631
632 int ns = snap_->atomData.skippedCharge.size();
633 vector<RealType> skch_tmp(ns, 0.0);
634
635 AtomCommRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
636 for (int i = 0; i < ns; i++) {
637 snap_->atomData.skippedCharge[i] += skch_tmp[i];
638 skch_tmp[i] = 0.0;
639 }
640
641 AtomCommRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
642 for (int i = 0; i < ns; i++)
643 snap_->atomData.skippedCharge[i] += skch_tmp[i];
644 }
645
646 nLocal_ = snap_->getNumberOfAtoms();
647
648 vector<potVec> pot_temp(nLocal_,
649 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
650
651 // scatter/gather pot_row into the members of my column
652
653 AtomCommPotRow->scatter(pot_row, pot_temp);
654
655 for (int ii = 0; ii < pot_temp.size(); ii++ )
656 pairwisePot += pot_temp[ii];
657
658 fill(pot_temp.begin(), pot_temp.end(),
659 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
660
661 AtomCommPotColumn->scatter(pot_col, pot_temp);
662
663 for (int ii = 0; ii < pot_temp.size(); ii++ )
664 pairwisePot += pot_temp[ii];
665 #endif
666
667 }
668
669 int ForceMatrixDecomposition::getNAtomsInRow() {
670 #ifdef IS_MPI
671 return nAtomsInRow_;
672 #else
673 return nLocal_;
674 #endif
675 }
676
677 /**
678 * returns the list of atoms belonging to this group.
679 */
680 vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
681 #ifdef IS_MPI
682 return groupListRow_[cg1];
683 #else
684 return groupList_[cg1];
685 #endif
686 }
687
688 vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
689 #ifdef IS_MPI
690 return groupListCol_[cg2];
691 #else
692 return groupList_[cg2];
693 #endif
694 }
695
696 Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
697 Vector3d d;
698
699 #ifdef IS_MPI
700 d = cgColData.position[cg2] - cgRowData.position[cg1];
701 #else
702 d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
703 #endif
704
705 snap_->wrapVector(d);
706 return d;
707 }
708
709
710 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
711
712 Vector3d d;
713
714 #ifdef IS_MPI
715 d = cgRowData.position[cg1] - atomRowData.position[atom1];
716 #else
717 d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
718 #endif
719
720 snap_->wrapVector(d);
721 return d;
722 }
723
724 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
725 Vector3d d;
726
727 #ifdef IS_MPI
728 d = cgColData.position[cg2] - atomColData.position[atom2];
729 #else
730 d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
731 #endif
732
733 snap_->wrapVector(d);
734 return d;
735 }
736
737 RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
738 #ifdef IS_MPI
739 return massFactorsRow[atom1];
740 #else
741 return massFactors[atom1];
742 #endif
743 }
744
745 RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
746 #ifdef IS_MPI
747 return massFactorsCol[atom2];
748 #else
749 return massFactors[atom2];
750 #endif
751
752 }
753
754 Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
755 Vector3d d;
756
757 #ifdef IS_MPI
758 d = atomColData.position[atom2] - atomRowData.position[atom1];
759 #else
760 d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
761 #endif
762
763 snap_->wrapVector(d);
764 return d;
765 }
766
767 vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
768 return excludesForAtom[atom1];
769 }
770
771 /**
772 * We need to exclude some overcounted interactions that result from
773 * the parallel decomposition.
774 */
775 bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
776 int unique_id_1, unique_id_2;
777
778 #ifdef IS_MPI
779 // in MPI, we have to look up the unique IDs for each atom
780 unique_id_1 = AtomRowToGlobal[atom1];
781 unique_id_2 = AtomColToGlobal[atom2];
782
783 // this situation should only arise in MPI simulations
784 if (unique_id_1 == unique_id_2) return true;
785
786 // this prevents us from doing the pair on multiple processors
787 if (unique_id_1 < unique_id_2) {
788 if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
789 } else {
790 if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
791 }
792 #endif
793 return false;
794 }
795
796 /**
797 * We need to handle the interactions for atoms who are involved in
798 * the same rigid body as well as some short range interactions
799 * (bonds, bends, torsions) differently from other interactions.
800 * We'll still visit the pairwise routines, but with a flag that
801 * tells those routines to exclude the pair from direct long range
802 * interactions. Some indirect interactions (notably reaction
803 * field) must still be handled for these pairs.
804 */
805 bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
806 int unique_id_2;
807
808 #ifdef IS_MPI
809 // in MPI, we have to look up the unique IDs for the row atom.
810 unique_id_2 = AtomColToGlobal[atom2];
811 #else
812 // in the normal loop, the atom numbers are unique
813 unique_id_2 = atom2;
814 #endif
815
816 for (vector<int>::iterator i = excludesForAtom[atom1].begin();
817 i != excludesForAtom[atom1].end(); ++i) {
818 if ( (*i) == unique_id_2 ) return true;
819 }
820
821 return false;
822 }
823
824
825 void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
826 #ifdef IS_MPI
827 atomRowData.force[atom1] += fg;
828 #else
829 snap_->atomData.force[atom1] += fg;
830 #endif
831 }
832
833 void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
834 #ifdef IS_MPI
835 atomColData.force[atom2] += fg;
836 #else
837 snap_->atomData.force[atom2] += fg;
838 #endif
839 }
840
841 // filling interaction blocks with pointers
842 void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
843 int atom1, int atom2) {
844
845 idat.excluded = excludeAtomPair(atom1, atom2);
846
847 #ifdef IS_MPI
848 idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
849 //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
850 // ff_->getAtomType(identsCol[atom2]) );
851
852 if (storageLayout_ & DataStorage::dslAmat) {
853 idat.A1 = &(atomRowData.aMat[atom1]);
854 idat.A2 = &(atomColData.aMat[atom2]);
855 }
856
857 if (storageLayout_ & DataStorage::dslElectroFrame) {
858 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
859 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
860 }
861
862 if (storageLayout_ & DataStorage::dslTorque) {
863 idat.t1 = &(atomRowData.torque[atom1]);
864 idat.t2 = &(atomColData.torque[atom2]);
865 }
866
867 if (storageLayout_ & DataStorage::dslDensity) {
868 idat.rho1 = &(atomRowData.density[atom1]);
869 idat.rho2 = &(atomColData.density[atom2]);
870 }
871
872 if (storageLayout_ & DataStorage::dslFunctional) {
873 idat.frho1 = &(atomRowData.functional[atom1]);
874 idat.frho2 = &(atomColData.functional[atom2]);
875 }
876
877 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
878 idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
879 idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
880 }
881
882 if (storageLayout_ & DataStorage::dslParticlePot) {
883 idat.particlePot1 = &(atomRowData.particlePot[atom1]);
884 idat.particlePot2 = &(atomColData.particlePot[atom2]);
885 }
886
887 if (storageLayout_ & DataStorage::dslSkippedCharge) {
888 idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
889 idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
890 }
891
892 #else
893
894 idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
895 //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
896 // ff_->getAtomType(idents[atom2]) );
897
898 if (storageLayout_ & DataStorage::dslAmat) {
899 idat.A1 = &(snap_->atomData.aMat[atom1]);
900 idat.A2 = &(snap_->atomData.aMat[atom2]);
901 }
902
903 if (storageLayout_ & DataStorage::dslElectroFrame) {
904 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
905 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
906 }
907
908 if (storageLayout_ & DataStorage::dslTorque) {
909 idat.t1 = &(snap_->atomData.torque[atom1]);
910 idat.t2 = &(snap_->atomData.torque[atom2]);
911 }
912
913 if (storageLayout_ & DataStorage::dslDensity) {
914 idat.rho1 = &(snap_->atomData.density[atom1]);
915 idat.rho2 = &(snap_->atomData.density[atom2]);
916 }
917
918 if (storageLayout_ & DataStorage::dslFunctional) {
919 idat.frho1 = &(snap_->atomData.functional[atom1]);
920 idat.frho2 = &(snap_->atomData.functional[atom2]);
921 }
922
923 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
924 idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
925 idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
926 }
927
928 if (storageLayout_ & DataStorage::dslParticlePot) {
929 idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
930 idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
931 }
932
933 if (storageLayout_ & DataStorage::dslSkippedCharge) {
934 idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
935 idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
936 }
937 #endif
938 }
939
940
941 void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {
942 #ifdef IS_MPI
943 pot_row[atom1] += 0.5 * *(idat.pot);
944 pot_col[atom2] += 0.5 * *(idat.pot);
945
946 atomRowData.force[atom1] += *(idat.f1);
947 atomColData.force[atom2] -= *(idat.f1);
948 #else
949 pairwisePot += *(idat.pot);
950
951 snap_->atomData.force[atom1] += *(idat.f1);
952 snap_->atomData.force[atom2] -= *(idat.f1);
953 #endif
954
955 }
956
957 /*
958 * buildNeighborList
959 *
960 * first element of pair is row-indexed CutoffGroup
961 * second element of pair is column-indexed CutoffGroup
962 */
963 vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
964
965 vector<pair<int, int> > neighborList;
966 groupCutoffs cuts;
967 bool doAllPairs = false;
968
969 #ifdef IS_MPI
970 cellListRow_.clear();
971 cellListCol_.clear();
972 #else
973 cellList_.clear();
974 #endif
975
976 RealType rList_ = (largestRcut_ + skinThickness_);
977 RealType rl2 = rList_ * rList_;
978 Snapshot* snap_ = sman_->getCurrentSnapshot();
979 Mat3x3d Hmat = snap_->getHmat();
980 Vector3d Hx = Hmat.getColumn(0);
981 Vector3d Hy = Hmat.getColumn(1);
982 Vector3d Hz = Hmat.getColumn(2);
983
984 nCells_.x() = (int) ( Hx.length() )/ rList_;
985 nCells_.y() = (int) ( Hy.length() )/ rList_;
986 nCells_.z() = (int) ( Hz.length() )/ rList_;
987
988 // handle small boxes where the cell offsets can end up repeating cells
989
990 if (nCells_.x() < 3) doAllPairs = true;
991 if (nCells_.y() < 3) doAllPairs = true;
992 if (nCells_.z() < 3) doAllPairs = true;
993
994 Mat3x3d invHmat = snap_->getInvHmat();
995 Vector3d rs, scaled, dr;
996 Vector3i whichCell;
997 int cellIndex;
998 int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
999
1000 #ifdef IS_MPI
1001 cellListRow_.resize(nCtot);
1002 cellListCol_.resize(nCtot);
1003 #else
1004 cellList_.resize(nCtot);
1005 #endif
1006
1007 if (!doAllPairs) {
1008 #ifdef IS_MPI
1009
1010 for (int i = 0; i < nGroupsInRow_; i++) {
1011 rs = cgRowData.position[i];
1012
1013 // scaled positions relative to the box vectors
1014 scaled = invHmat * rs;
1015
1016 // wrap the vector back into the unit box by subtracting integer box
1017 // numbers
1018 for (int j = 0; j < 3; j++) {
1019 scaled[j] -= roundMe(scaled[j]);
1020 scaled[j] += 0.5;
1021 }
1022
1023 // find xyz-indices of cell that cutoffGroup is in.
1024 whichCell.x() = nCells_.x() * scaled.x();
1025 whichCell.y() = nCells_.y() * scaled.y();
1026 whichCell.z() = nCells_.z() * scaled.z();
1027
1028 // find single index of this cell:
1029 cellIndex = Vlinear(whichCell, nCells_);
1030
1031 // add this cutoff group to the list of groups in this cell;
1032 cellListRow_[cellIndex].push_back(i);
1033 }
1034
1035 for (int i = 0; i < nGroupsInCol_; i++) {
1036 rs = cgColData.position[i];
1037
1038 // scaled positions relative to the box vectors
1039 scaled = invHmat * rs;
1040
1041 // wrap the vector back into the unit box by subtracting integer box
1042 // numbers
1043 for (int j = 0; j < 3; j++) {
1044 scaled[j] -= roundMe(scaled[j]);
1045 scaled[j] += 0.5;
1046 }
1047
1048 // find xyz-indices of cell that cutoffGroup is in.
1049 whichCell.x() = nCells_.x() * scaled.x();
1050 whichCell.y() = nCells_.y() * scaled.y();
1051 whichCell.z() = nCells_.z() * scaled.z();
1052
1053 // find single index of this cell:
1054 cellIndex = Vlinear(whichCell, nCells_);
1055
1056 // add this cutoff group to the list of groups in this cell;
1057 cellListCol_[cellIndex].push_back(i);
1058 }
1059 #else
1060 for (int i = 0; i < nGroups_; i++) {
1061 rs = snap_->cgData.position[i];
1062
1063 // scaled positions relative to the box vectors
1064 scaled = invHmat * rs;
1065
1066 // wrap the vector back into the unit box by subtracting integer box
1067 // numbers
1068 for (int j = 0; j < 3; j++) {
1069 scaled[j] -= roundMe(scaled[j]);
1070 scaled[j] += 0.5;
1071 }
1072
1073 // find xyz-indices of cell that cutoffGroup is in.
1074 whichCell.x() = nCells_.x() * scaled.x();
1075 whichCell.y() = nCells_.y() * scaled.y();
1076 whichCell.z() = nCells_.z() * scaled.z();
1077
1078 // find single index of this cell:
1079 cellIndex = Vlinear(whichCell, nCells_);
1080
1081 // add this cutoff group to the list of groups in this cell;
1082 cellList_[cellIndex].push_back(i);
1083 }
1084 #endif
1085
1086 for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1087 for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1088 for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1089 Vector3i m1v(m1x, m1y, m1z);
1090 int m1 = Vlinear(m1v, nCells_);
1091
1092 for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1093 os != cellOffsets_.end(); ++os) {
1094
1095 Vector3i m2v = m1v + (*os);
1096
1097 if (m2v.x() >= nCells_.x()) {
1098 m2v.x() = 0;
1099 } else if (m2v.x() < 0) {
1100 m2v.x() = nCells_.x() - 1;
1101 }
1102
1103 if (m2v.y() >= nCells_.y()) {
1104 m2v.y() = 0;
1105 } else if (m2v.y() < 0) {
1106 m2v.y() = nCells_.y() - 1;
1107 }
1108
1109 if (m2v.z() >= nCells_.z()) {
1110 m2v.z() = 0;
1111 } else if (m2v.z() < 0) {
1112 m2v.z() = nCells_.z() - 1;
1113 }
1114
1115 int m2 = Vlinear (m2v, nCells_);
1116
1117 #ifdef IS_MPI
1118 for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1119 j1 != cellListRow_[m1].end(); ++j1) {
1120 for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1121 j2 != cellListCol_[m2].end(); ++j2) {
1122
1123 // Always do this if we're in different cells or if
1124 // we're in the same cell and the global index of the
1125 // j2 cutoff group is less than the j1 cutoff group
1126
1127 if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
1128 dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1129 snap_->wrapVector(dr);
1130 cuts = getGroupCutoffs( (*j1), (*j2) );
1131 if (dr.lengthSquare() < cuts.third) {
1132 neighborList.push_back(make_pair((*j1), (*j2)));
1133 }
1134 }
1135 }
1136 }
1137 #else
1138
1139 for (vector<int>::iterator j1 = cellList_[m1].begin();
1140 j1 != cellList_[m1].end(); ++j1) {
1141 for (vector<int>::iterator j2 = cellList_[m2].begin();
1142 j2 != cellList_[m2].end(); ++j2) {
1143
1144 // Always do this if we're in different cells or if
1145 // we're in the same cell and the global index of the
1146 // j2 cutoff group is less than the j1 cutoff group
1147
1148 if (m2 != m1 || (*j2) < (*j1)) {
1149 dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1150 snap_->wrapVector(dr);
1151 cuts = getGroupCutoffs( (*j1), (*j2) );
1152 if (dr.lengthSquare() < cuts.third) {
1153 neighborList.push_back(make_pair((*j1), (*j2)));
1154 }
1155 }
1156 }
1157 }
1158 #endif
1159 }
1160 }
1161 }
1162 }
1163 } else {
1164 // branch to do all cutoff group pairs
1165 #ifdef IS_MPI
1166 for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1167 for (int j2 = 0; j2 < nGroupsInCol_; j2++) {
1168 dr = cgColData.position[j2] - cgRowData.position[j1];
1169 snap_->wrapVector(dr);
1170 cuts = getGroupCutoffs( j1, j2 );
1171 if (dr.lengthSquare() < cuts.third) {
1172 neighborList.push_back(make_pair(j1, j2));
1173 }
1174 }
1175 }
1176 #else
1177 for (int j1 = 0; j1 < nGroups_ - 1; j1++) {
1178 for (int j2 = j1 + 1; j2 < nGroups_; j2++) {
1179 dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1180 snap_->wrapVector(dr);
1181 cuts = getGroupCutoffs( j1, j2 );
1182 if (dr.lengthSquare() < cuts.third) {
1183 neighborList.push_back(make_pair(j1, j2));
1184 }
1185 }
1186 }
1187 #endif
1188 }
1189
1190 // save the local cutoff group positions for the check that is
1191 // done on each loop:
1192 saved_CG_positions_.clear();
1193 for (int i = 0; i < nGroups_; i++)
1194 saved_CG_positions_.push_back(snap_->cgData.position[i]);
1195
1196 return neighborList;
1197 }
1198 } //end namespace OpenMD