ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/development/src/parallel/ForceMatrixDecomposition.cpp
Revision: 1584
Committed: Fri Jun 17 20:16:35 2011 UTC (13 years, 10 months ago) by gezelter
File size: 37236 byte(s)
Log Message:
bug fixes.

File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the
15 * distribution.
16 *
17 * This software is provided "AS IS," without a warranty of any
18 * kind. All express or implied conditions, representations and
19 * warranties, including any implied warranty of merchantability,
20 * fitness for a particular purpose or non-infringement, are hereby
21 * excluded. The University of Notre Dame and its licensors shall not
22 * be liable for any damages suffered by licensee as a result of
23 * using, modifying or distributing the software or its
24 * derivatives. In no event will the University of Notre Dame or its
25 * licensors be liable for any lost revenue, profit or data, or for
26 * direct, indirect, special, consequential, incidental or punitive
27 * damages, however caused and regardless of the theory of liability,
28 * arising out of the use of or inability to use software, even if the
29 * University of Notre Dame has been advised of the possibility of
30 * such damages.
31 *
32 * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your
33 * research, please cite the appropriate papers when you publish your
34 * work. Good starting points are:
35 *
36 * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).
37 * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).
38 * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).
39 * [4] Vardeman & Gezelter, in progress (2009).
40 */
41 #include "parallel/ForceMatrixDecomposition.hpp"
42 #include "math/SquareMatrix3.hpp"
43 #include "nonbonded/NonBondedInteraction.hpp"
44 #include "brains/SnapshotManager.hpp"
45 #include "brains/PairList.hpp"
46
47 using namespace std;
48 namespace OpenMD {
49
50 /**
51 * distributeInitialData is essentially a copy of the older fortran
52 * SimulationSetup
53 */
54
55 void ForceMatrixDecomposition::distributeInitialData() {
56 snap_ = sman_->getCurrentSnapshot();
57 storageLayout_ = sman_->getStorageLayout();
58 ff_ = info_->getForceField();
59 nLocal_ = snap_->getNumberOfAtoms();
60
61 nGroups_ = info_->getNLocalCutoffGroups();
62 cerr << "in dId, nGroups = " << nGroups_ << "\n";
63 // gather the information for atomtype IDs (atids):
64 idents = info_->getIdentArray();
65 AtomLocalToGlobal = info_->getGlobalAtomIndices();
66 cgLocalToGlobal = info_->getGlobalGroupIndices();
67 vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
68 massFactors = info_->getMassFactors();
69
70 PairList excludes = info_->getExcludedInteractions();
71 PairList oneTwo = info_->getOneTwoInteractions();
72 PairList oneThree = info_->getOneThreeInteractions();
73 PairList oneFour = info_->getOneFourInteractions();
74
75 #ifdef IS_MPI
76
77 AtomCommIntRow = new Communicator<Row,int>(nLocal_);
78 AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
79 AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
80 AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
81 AtomCommPotRow = new Communicator<Row,potVec>(nLocal_);
82
83 AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
84 AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
85 AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
86 AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
87 AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_);
88
89 cgCommIntRow = new Communicator<Row,int>(nGroups_);
90 cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
91 cgCommIntColumn = new Communicator<Column,int>(nGroups_);
92 cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
93
94 nAtomsInRow_ = AtomCommIntRow->getSize();
95 nAtomsInCol_ = AtomCommIntColumn->getSize();
96 nGroupsInRow_ = cgCommIntRow->getSize();
97 nGroupsInCol_ = cgCommIntColumn->getSize();
98
99 // Modify the data storage objects with the correct layouts and sizes:
100 atomRowData.resize(nAtomsInRow_);
101 atomRowData.setStorageLayout(storageLayout_);
102 atomColData.resize(nAtomsInCol_);
103 atomColData.setStorageLayout(storageLayout_);
104 cgRowData.resize(nGroupsInRow_);
105 cgRowData.setStorageLayout(DataStorage::dslPosition);
106 cgColData.resize(nGroupsInCol_);
107 cgColData.setStorageLayout(DataStorage::dslPosition);
108
109 identsRow.resize(nAtomsInRow_);
110 identsCol.resize(nAtomsInCol_);
111
112 AtomCommIntRow->gather(idents, identsRow);
113 AtomCommIntColumn->gather(idents, identsCol);
114
115 AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
116 AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
117
118 cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
119 cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
120
121 AtomCommRealRow->gather(massFactors, massFactorsRow);
122 AtomCommRealColumn->gather(massFactors, massFactorsCol);
123
124 groupListRow_.clear();
125 groupListRow_.resize(nGroupsInRow_);
126 for (int i = 0; i < nGroupsInRow_; i++) {
127 int gid = cgRowToGlobal[i];
128 for (int j = 0; j < nAtomsInRow_; j++) {
129 int aid = AtomRowToGlobal[j];
130 if (globalGroupMembership[aid] == gid)
131 groupListRow_[i].push_back(j);
132 }
133 }
134
135 groupListCol_.clear();
136 groupListCol_.resize(nGroupsInCol_);
137 for (int i = 0; i < nGroupsInCol_; i++) {
138 int gid = cgColToGlobal[i];
139 for (int j = 0; j < nAtomsInCol_; j++) {
140 int aid = AtomColToGlobal[j];
141 if (globalGroupMembership[aid] == gid)
142 groupListCol_[i].push_back(j);
143 }
144 }
145
146 skipsForAtom.clear();
147 skipsForAtom.resize(nAtomsInRow_);
148 toposForAtom.clear();
149 toposForAtom.resize(nAtomsInRow_);
150 topoDist.clear();
151 topoDist.resize(nAtomsInRow_);
152 for (int i = 0; i < nAtomsInRow_; i++) {
153 int iglob = AtomRowToGlobal[i];
154
155 for (int j = 0; j < nAtomsInCol_; j++) {
156 int jglob = AtomColToGlobal[j];
157
158 if (excludes.hasPair(iglob, jglob))
159 skipsForAtom[i].push_back(j);
160
161 if (oneTwo.hasPair(iglob, jglob)) {
162 toposForAtom[i].push_back(j);
163 topoDist[i].push_back(1);
164 } else {
165 if (oneThree.hasPair(iglob, jglob)) {
166 toposForAtom[i].push_back(j);
167 topoDist[i].push_back(2);
168 } else {
169 if (oneFour.hasPair(iglob, jglob)) {
170 toposForAtom[i].push_back(j);
171 topoDist[i].push_back(3);
172 }
173 }
174 }
175 }
176 }
177
178 #endif
179
180 groupList_.clear();
181 groupList_.resize(nGroups_);
182 for (int i = 0; i < nGroups_; i++) {
183 int gid = cgLocalToGlobal[i];
184 for (int j = 0; j < nLocal_; j++) {
185 int aid = AtomLocalToGlobal[j];
186 if (globalGroupMembership[aid] == gid) {
187 groupList_[i].push_back(j);
188 }
189 }
190 }
191
192 skipsForAtom.clear();
193 skipsForAtom.resize(nLocal_);
194 toposForAtom.clear();
195 toposForAtom.resize(nLocal_);
196 topoDist.clear();
197 topoDist.resize(nLocal_);
198
199 for (int i = 0; i < nLocal_; i++) {
200 int iglob = AtomLocalToGlobal[i];
201
202 for (int j = 0; j < nLocal_; j++) {
203 int jglob = AtomLocalToGlobal[j];
204
205 if (excludes.hasPair(iglob, jglob))
206 skipsForAtom[i].push_back(j);
207
208 if (oneTwo.hasPair(iglob, jglob)) {
209 toposForAtom[i].push_back(j);
210 topoDist[i].push_back(1);
211 } else {
212 if (oneThree.hasPair(iglob, jglob)) {
213 toposForAtom[i].push_back(j);
214 topoDist[i].push_back(2);
215 } else {
216 if (oneFour.hasPair(iglob, jglob)) {
217 toposForAtom[i].push_back(j);
218 topoDist[i].push_back(3);
219 }
220 }
221 }
222 }
223 }
224
225 createGtypeCutoffMap();
226 }
227
228 void ForceMatrixDecomposition::createGtypeCutoffMap() {
229
230 RealType tol = 1e-6;
231 RealType rc;
232 int atid;
233 set<AtomType*> atypes = info_->getSimulatedAtomTypes();
234 vector<RealType> atypeCutoff;
235 atypeCutoff.resize( atypes.size() );
236
237 for (set<AtomType*>::iterator at = atypes.begin();
238 at != atypes.end(); ++at){
239 atid = (*at)->getIdent();
240
241 if (userChoseCutoff_)
242 atypeCutoff[atid] = userCutoff_;
243 else
244 atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
245 }
246
247 vector<RealType> gTypeCutoffs;
248
249 // first we do a single loop over the cutoff groups to find the
250 // largest cutoff for any atypes present in this group.
251 #ifdef IS_MPI
252 vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
253 groupRowToGtype.resize(nGroupsInRow_);
254 for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
255 vector<int> atomListRow = getAtomsInGroupRow(cg1);
256 for (vector<int>::iterator ia = atomListRow.begin();
257 ia != atomListRow.end(); ++ia) {
258 int atom1 = (*ia);
259 atid = identsRow[atom1];
260 if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
261 groupCutoffRow[cg1] = atypeCutoff[atid];
262 }
263 }
264
265 bool gTypeFound = false;
266 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
267 if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
268 groupRowToGtype[cg1] = gt;
269 gTypeFound = true;
270 }
271 }
272 if (!gTypeFound) {
273 gTypeCutoffs.push_back( groupCutoffRow[cg1] );
274 groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
275 }
276
277 }
278 vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
279 groupColToGtype.resize(nGroupsInCol_);
280 for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
281 vector<int> atomListCol = getAtomsInGroupColumn(cg2);
282 for (vector<int>::iterator jb = atomListCol.begin();
283 jb != atomListCol.end(); ++jb) {
284 int atom2 = (*jb);
285 atid = identsCol[atom2];
286 if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
287 groupCutoffCol[cg2] = atypeCutoff[atid];
288 }
289 }
290 bool gTypeFound = false;
291 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
292 if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
293 groupColToGtype[cg2] = gt;
294 gTypeFound = true;
295 }
296 }
297 if (!gTypeFound) {
298 gTypeCutoffs.push_back( groupCutoffCol[cg2] );
299 groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
300 }
301 }
302 #else
303
304 vector<RealType> groupCutoff(nGroups_, 0.0);
305 groupToGtype.resize(nGroups_);
306
307 cerr << "nGroups = " << nGroups_ << "\n";
308 for (int cg1 = 0; cg1 < nGroups_; cg1++) {
309
310 groupCutoff[cg1] = 0.0;
311 vector<int> atomList = getAtomsInGroupRow(cg1);
312
313 for (vector<int>::iterator ia = atomList.begin();
314 ia != atomList.end(); ++ia) {
315 int atom1 = (*ia);
316 atid = idents[atom1];
317 if (atypeCutoff[atid] > groupCutoff[cg1]) {
318 groupCutoff[cg1] = atypeCutoff[atid];
319 }
320 }
321
322 bool gTypeFound = false;
323 for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
324 if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
325 groupToGtype[cg1] = gt;
326 gTypeFound = true;
327 }
328 }
329 if (!gTypeFound) {
330 gTypeCutoffs.push_back( groupCutoff[cg1] );
331 groupToGtype[cg1] = gTypeCutoffs.size() - 1;
332 }
333 }
334 #endif
335
336 cerr << "gTypeCutoffs.size() = " << gTypeCutoffs.size() << "\n";
337 // Now we find the maximum group cutoff value present in the simulation
338
339 RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end());
340
341 #ifdef IS_MPI
342 MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX);
343 #endif
344
345 RealType tradRcut = groupMax;
346
347 for (int i = 0; i < gTypeCutoffs.size(); i++) {
348 for (int j = 0; j < gTypeCutoffs.size(); j++) {
349 RealType thisRcut;
350 switch(cutoffPolicy_) {
351 case TRADITIONAL:
352 thisRcut = tradRcut;
353 break;
354 case MIX:
355 thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
356 break;
357 case MAX:
358 thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
359 break;
360 default:
361 sprintf(painCave.errMsg,
362 "ForceMatrixDecomposition::createGtypeCutoffMap "
363 "hit an unknown cutoff policy!\n");
364 painCave.severity = OPENMD_ERROR;
365 painCave.isFatal = 1;
366 simError();
367 break;
368 }
369
370 pair<int,int> key = make_pair(i,j);
371 gTypeCutoffMap[key].first = thisRcut;
372
373 if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
374
375 gTypeCutoffMap[key].second = thisRcut*thisRcut;
376
377 gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
378
379 // sanity check
380
381 if (userChoseCutoff_) {
382 if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
383 sprintf(painCave.errMsg,
384 "ForceMatrixDecomposition::createGtypeCutoffMap "
385 "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
386 painCave.severity = OPENMD_ERROR;
387 painCave.isFatal = 1;
388 simError();
389 }
390 }
391 }
392 }
393 }
394
395
396 groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
397 int i, j;
398 #ifdef IS_MPI
399 i = groupRowToGtype[cg1];
400 j = groupColToGtype[cg2];
401 #else
402 i = groupToGtype[cg1];
403 j = groupToGtype[cg2];
404 #endif
405 return gTypeCutoffMap[make_pair(i,j)];
406 }
407
408 int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
409 for (int j = 0; j < toposForAtom[atom1].size(); j++) {
410 if (toposForAtom[atom1][j] == atom2)
411 return topoDist[atom1][j];
412 }
413 return 0;
414 }
415
416 void ForceMatrixDecomposition::zeroWorkArrays() {
417 pairwisePot = 0.0;
418 embeddingPot = 0.0;
419
420 #ifdef IS_MPI
421 if (storageLayout_ & DataStorage::dslForce) {
422 fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
423 fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
424 }
425
426 if (storageLayout_ & DataStorage::dslTorque) {
427 fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
428 fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
429 }
430
431 fill(pot_row.begin(), pot_row.end(),
432 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
433
434 fill(pot_col.begin(), pot_col.end(),
435 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
436
437 if (storageLayout_ & DataStorage::dslParticlePot) {
438 fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0);
439 fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0);
440 }
441
442 if (storageLayout_ & DataStorage::dslDensity) {
443 fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
444 fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
445 }
446
447 if (storageLayout_ & DataStorage::dslFunctional) {
448 fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0);
449 fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0);
450 }
451
452 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
453 fill(atomRowData.functionalDerivative.begin(),
454 atomRowData.functionalDerivative.end(), 0.0);
455 fill(atomColData.functionalDerivative.begin(),
456 atomColData.functionalDerivative.end(), 0.0);
457 }
458
459 #else
460
461 if (storageLayout_ & DataStorage::dslParticlePot) {
462 fill(snap_->atomData.particlePot.begin(),
463 snap_->atomData.particlePot.end(), 0.0);
464 }
465
466 if (storageLayout_ & DataStorage::dslDensity) {
467 fill(snap_->atomData.density.begin(),
468 snap_->atomData.density.end(), 0.0);
469 }
470 if (storageLayout_ & DataStorage::dslFunctional) {
471 fill(snap_->atomData.functional.begin(),
472 snap_->atomData.functional.end(), 0.0);
473 }
474 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
475 fill(snap_->atomData.functionalDerivative.begin(),
476 snap_->atomData.functionalDerivative.end(), 0.0);
477 }
478 #endif
479
480 }
481
482
483 void ForceMatrixDecomposition::distributeData() {
484 snap_ = sman_->getCurrentSnapshot();
485 storageLayout_ = sman_->getStorageLayout();
486 #ifdef IS_MPI
487
488 // gather up the atomic positions
489 AtomCommVectorRow->gather(snap_->atomData.position,
490 atomRowData.position);
491 AtomCommVectorColumn->gather(snap_->atomData.position,
492 atomColData.position);
493
494 // gather up the cutoff group positions
495 cgCommVectorRow->gather(snap_->cgData.position,
496 cgRowData.position);
497 cgCommVectorColumn->gather(snap_->cgData.position,
498 cgColData.position);
499
500 // if needed, gather the atomic rotation matrices
501 if (storageLayout_ & DataStorage::dslAmat) {
502 AtomCommMatrixRow->gather(snap_->atomData.aMat,
503 atomRowData.aMat);
504 AtomCommMatrixColumn->gather(snap_->atomData.aMat,
505 atomColData.aMat);
506 }
507
508 // if needed, gather the atomic eletrostatic frames
509 if (storageLayout_ & DataStorage::dslElectroFrame) {
510 AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
511 atomRowData.electroFrame);
512 AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
513 atomColData.electroFrame);
514 }
515 #endif
516 }
517
518 /* collects information obtained during the pre-pair loop onto local
519 * data structures.
520 */
521 void ForceMatrixDecomposition::collectIntermediateData() {
522 snap_ = sman_->getCurrentSnapshot();
523 storageLayout_ = sman_->getStorageLayout();
524 #ifdef IS_MPI
525
526 if (storageLayout_ & DataStorage::dslDensity) {
527
528 AtomCommRealRow->scatter(atomRowData.density,
529 snap_->atomData.density);
530
531 int n = snap_->atomData.density.size();
532 vector<RealType> rho_tmp(n, 0.0);
533 AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
534 for (int i = 0; i < n; i++)
535 snap_->atomData.density[i] += rho_tmp[i];
536 }
537 #endif
538 }
539
540 /*
541 * redistributes information obtained during the pre-pair loop out to
542 * row and column-indexed data structures
543 */
544 void ForceMatrixDecomposition::distributeIntermediateData() {
545 snap_ = sman_->getCurrentSnapshot();
546 storageLayout_ = sman_->getStorageLayout();
547 #ifdef IS_MPI
548 if (storageLayout_ & DataStorage::dslFunctional) {
549 AtomCommRealRow->gather(snap_->atomData.functional,
550 atomRowData.functional);
551 AtomCommRealColumn->gather(snap_->atomData.functional,
552 atomColData.functional);
553 }
554
555 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
556 AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
557 atomRowData.functionalDerivative);
558 AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
559 atomColData.functionalDerivative);
560 }
561 #endif
562 }
563
564
565 void ForceMatrixDecomposition::collectData() {
566 snap_ = sman_->getCurrentSnapshot();
567 storageLayout_ = sman_->getStorageLayout();
568 #ifdef IS_MPI
569 int n = snap_->atomData.force.size();
570 vector<Vector3d> frc_tmp(n, V3Zero);
571
572 AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
573 for (int i = 0; i < n; i++) {
574 snap_->atomData.force[i] += frc_tmp[i];
575 frc_tmp[i] = 0.0;
576 }
577
578 AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
579 for (int i = 0; i < n; i++)
580 snap_->atomData.force[i] += frc_tmp[i];
581
582
583 if (storageLayout_ & DataStorage::dslTorque) {
584
585 int nt = snap_->atomData.force.size();
586 vector<Vector3d> trq_tmp(nt, V3Zero);
587
588 AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
589 for (int i = 0; i < n; i++) {
590 snap_->atomData.torque[i] += trq_tmp[i];
591 trq_tmp[i] = 0.0;
592 }
593
594 AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
595 for (int i = 0; i < n; i++)
596 snap_->atomData.torque[i] += trq_tmp[i];
597 }
598
599 nLocal_ = snap_->getNumberOfAtoms();
600
601 vector<potVec> pot_temp(nLocal_,
602 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
603
604 // scatter/gather pot_row into the members of my column
605
606 AtomCommPotRow->scatter(pot_row, pot_temp);
607
608 for (int ii = 0; ii < pot_temp.size(); ii++ )
609 pairwisePot += pot_temp[ii];
610
611 fill(pot_temp.begin(), pot_temp.end(),
612 Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
613
614 AtomCommPotColumn->scatter(pot_col, pot_temp);
615
616 for (int ii = 0; ii < pot_temp.size(); ii++ )
617 pairwisePot += pot_temp[ii];
618 #endif
619
620 }
621
622 int ForceMatrixDecomposition::getNAtomsInRow() {
623 #ifdef IS_MPI
624 return nAtomsInRow_;
625 #else
626 return nLocal_;
627 #endif
628 }
629
630 /**
631 * returns the list of atoms belonging to this group.
632 */
633 vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
634 #ifdef IS_MPI
635 return groupListRow_[cg1];
636 #else
637 return groupList_[cg1];
638 #endif
639 }
640
641 vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
642 #ifdef IS_MPI
643 return groupListCol_[cg2];
644 #else
645 return groupList_[cg2];
646 #endif
647 }
648
649 Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
650 Vector3d d;
651
652 #ifdef IS_MPI
653 d = cgColData.position[cg2] - cgRowData.position[cg1];
654 #else
655 d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
656 #endif
657
658 snap_->wrapVector(d);
659 return d;
660 }
661
662
663 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
664
665 Vector3d d;
666
667 #ifdef IS_MPI
668 d = cgRowData.position[cg1] - atomRowData.position[atom1];
669 #else
670 d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
671 #endif
672
673 snap_->wrapVector(d);
674 return d;
675 }
676
677 Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
678 Vector3d d;
679
680 #ifdef IS_MPI
681 d = cgColData.position[cg2] - atomColData.position[atom2];
682 #else
683 d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
684 #endif
685
686 snap_->wrapVector(d);
687 return d;
688 }
689
690 RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
691 #ifdef IS_MPI
692 return massFactorsRow[atom1];
693 #else
694 cerr << "mfs = " << massFactors.size() << " atom1 = " << atom1 << "\n";
695 return massFactors[atom1];
696 #endif
697 }
698
699 RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
700 #ifdef IS_MPI
701 return massFactorsCol[atom2];
702 #else
703 return massFactors[atom2];
704 #endif
705
706 }
707
708 Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
709 Vector3d d;
710
711 #ifdef IS_MPI
712 d = atomColData.position[atom2] - atomRowData.position[atom1];
713 #else
714 d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
715 #endif
716
717 snap_->wrapVector(d);
718 return d;
719 }
720
721 vector<int> ForceMatrixDecomposition::getSkipsForAtom(int atom1) {
722 return skipsForAtom[atom1];
723 }
724
725 /**
726 * There are a number of reasons to skip a pair or a
727 * particle. Mostly we do this to exclude atoms who are involved in
728 * short range interactions (bonds, bends, torsions), but we also
729 * need to exclude some overcounted interactions that result from
730 * the parallel decomposition.
731 */
732 bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
733 int unique_id_1, unique_id_2;
734
735 #ifdef IS_MPI
736 // in MPI, we have to look up the unique IDs for each atom
737 unique_id_1 = AtomRowToGlobal[atom1];
738 unique_id_2 = AtomColToGlobal[atom2];
739
740 // this situation should only arise in MPI simulations
741 if (unique_id_1 == unique_id_2) return true;
742
743 // this prevents us from doing the pair on multiple processors
744 if (unique_id_1 < unique_id_2) {
745 if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
746 } else {
747 if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
748 }
749 #else
750 // in the normal loop, the atom numbers are unique
751 unique_id_1 = atom1;
752 unique_id_2 = atom2;
753 #endif
754
755 for (vector<int>::iterator i = skipsForAtom[atom1].begin();
756 i != skipsForAtom[atom1].end(); ++i) {
757 if ( (*i) == unique_id_2 ) return true;
758 }
759
760 return false;
761 }
762
763
764 void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
765 #ifdef IS_MPI
766 atomRowData.force[atom1] += fg;
767 #else
768 snap_->atomData.force[atom1] += fg;
769 #endif
770 }
771
772 void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
773 #ifdef IS_MPI
774 atomColData.force[atom2] += fg;
775 #else
776 snap_->atomData.force[atom2] += fg;
777 #endif
778 }
779
780 // filling interaction blocks with pointers
781 void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
782 int atom1, int atom2) {
783 #ifdef IS_MPI
784
785 idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
786 ff_->getAtomType(identsCol[atom2]) );
787
788 if (storageLayout_ & DataStorage::dslAmat) {
789 idat.A1 = &(atomRowData.aMat[atom1]);
790 idat.A2 = &(atomColData.aMat[atom2]);
791 }
792
793 if (storageLayout_ & DataStorage::dslElectroFrame) {
794 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
795 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
796 }
797
798 if (storageLayout_ & DataStorage::dslTorque) {
799 idat.t1 = &(atomRowData.torque[atom1]);
800 idat.t2 = &(atomColData.torque[atom2]);
801 }
802
803 if (storageLayout_ & DataStorage::dslDensity) {
804 idat.rho1 = &(atomRowData.density[atom1]);
805 idat.rho2 = &(atomColData.density[atom2]);
806 }
807
808 if (storageLayout_ & DataStorage::dslFunctional) {
809 idat.frho1 = &(atomRowData.functional[atom1]);
810 idat.frho2 = &(atomColData.functional[atom2]);
811 }
812
813 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
814 idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
815 idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
816 }
817
818 if (storageLayout_ & DataStorage::dslParticlePot) {
819 idat.particlePot1 = &(atomRowData.particlePot[atom1]);
820 idat.particlePot2 = &(atomColData.particlePot[atom2]);
821 }
822
823 #else
824
825 idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
826 ff_->getAtomType(idents[atom2]) );
827
828 if (storageLayout_ & DataStorage::dslAmat) {
829 idat.A1 = &(snap_->atomData.aMat[atom1]);
830 idat.A2 = &(snap_->atomData.aMat[atom2]);
831 }
832
833 if (storageLayout_ & DataStorage::dslElectroFrame) {
834 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
835 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
836 }
837
838 if (storageLayout_ & DataStorage::dslTorque) {
839 idat.t1 = &(snap_->atomData.torque[atom1]);
840 idat.t2 = &(snap_->atomData.torque[atom2]);
841 }
842
843 if (storageLayout_ & DataStorage::dslDensity) {
844 idat.rho1 = &(snap_->atomData.density[atom1]);
845 idat.rho2 = &(snap_->atomData.density[atom2]);
846 }
847
848 if (storageLayout_ & DataStorage::dslFunctional) {
849 idat.frho1 = &(snap_->atomData.functional[atom1]);
850 idat.frho2 = &(snap_->atomData.functional[atom2]);
851 }
852
853 if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
854 idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
855 idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
856 }
857
858 if (storageLayout_ & DataStorage::dslParticlePot) {
859 idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
860 idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
861 }
862
863 #endif
864 }
865
866
867 void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {
868 #ifdef IS_MPI
869 pot_row[atom1] += 0.5 * *(idat.pot);
870 pot_col[atom2] += 0.5 * *(idat.pot);
871
872 atomRowData.force[atom1] += *(idat.f1);
873 atomColData.force[atom2] -= *(idat.f1);
874 #else
875 pairwisePot += *(idat.pot);
876
877 snap_->atomData.force[atom1] += *(idat.f1);
878 snap_->atomData.force[atom2] -= *(idat.f1);
879 #endif
880
881 }
882
883
884 void ForceMatrixDecomposition::fillSkipData(InteractionData &idat,
885 int atom1, int atom2) {
886 #ifdef IS_MPI
887 idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
888 ff_->getAtomType(identsCol[atom2]) );
889
890 if (storageLayout_ & DataStorage::dslElectroFrame) {
891 idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
892 idat.eFrame2 = &(atomColData.electroFrame[atom2]);
893 }
894
895 if (storageLayout_ & DataStorage::dslTorque) {
896 idat.t1 = &(atomRowData.torque[atom1]);
897 idat.t2 = &(atomColData.torque[atom2]);
898 }
899
900 if (storageLayout_ & DataStorage::dslSkippedCharge) {
901 idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
902 idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
903 }
904 #else
905 idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
906 ff_->getAtomType(idents[atom2]) );
907
908 if (storageLayout_ & DataStorage::dslElectroFrame) {
909 idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
910 idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
911 }
912
913 if (storageLayout_ & DataStorage::dslTorque) {
914 idat.t1 = &(snap_->atomData.torque[atom1]);
915 idat.t2 = &(snap_->atomData.torque[atom2]);
916 }
917
918 if (storageLayout_ & DataStorage::dslSkippedCharge) {
919 idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
920 idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
921 }
922 #endif
923 }
924
925
926 void ForceMatrixDecomposition::unpackSkipData(InteractionData &idat, int atom1, int atom2) {
927 #ifdef IS_MPI
928 pot_row[atom1] += 0.5 * *(idat.pot);
929 pot_col[atom2] += 0.5 * *(idat.pot);
930 #else
931 pairwisePot += *(idat.pot);
932 #endif
933
934 }
935
936
937 /*
938 * buildNeighborList
939 *
940 * first element of pair is row-indexed CutoffGroup
941 * second element of pair is column-indexed CutoffGroup
942 */
943 vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
944
945 vector<pair<int, int> > neighborList;
946 groupCutoffs cuts;
947 #ifdef IS_MPI
948 cellListRow_.clear();
949 cellListCol_.clear();
950 #else
951 cellList_.clear();
952 #endif
953
954 RealType rList_ = (largestRcut_ + skinThickness_);
955 RealType rl2 = rList_ * rList_;
956 Snapshot* snap_ = sman_->getCurrentSnapshot();
957 Mat3x3d Hmat = snap_->getHmat();
958 Vector3d Hx = Hmat.getColumn(0);
959 Vector3d Hy = Hmat.getColumn(1);
960 Vector3d Hz = Hmat.getColumn(2);
961
962 nCells_.x() = (int) ( Hx.length() )/ rList_;
963 nCells_.y() = (int) ( Hy.length() )/ rList_;
964 nCells_.z() = (int) ( Hz.length() )/ rList_;
965
966 Mat3x3d invHmat = snap_->getInvHmat();
967 Vector3d rs, scaled, dr;
968 Vector3i whichCell;
969 int cellIndex;
970 int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
971
972 #ifdef IS_MPI
973 cellListRow_.resize(nCtot);
974 cellListCol_.resize(nCtot);
975 #else
976 cellList_.resize(nCtot);
977 #endif
978
979 #ifdef IS_MPI
980 for (int i = 0; i < nGroupsInRow_; i++) {
981 rs = cgRowData.position[i];
982
983 // scaled positions relative to the box vectors
984 scaled = invHmat * rs;
985
986 // wrap the vector back into the unit box by subtracting integer box
987 // numbers
988 for (int j = 0; j < 3; j++) {
989 scaled[j] -= roundMe(scaled[j]);
990 scaled[j] += 0.5;
991 }
992
993 // find xyz-indices of cell that cutoffGroup is in.
994 whichCell.x() = nCells_.x() * scaled.x();
995 whichCell.y() = nCells_.y() * scaled.y();
996 whichCell.z() = nCells_.z() * scaled.z();
997
998 // find single index of this cell:
999 cellIndex = Vlinear(whichCell, nCells_);
1000
1001 // add this cutoff group to the list of groups in this cell;
1002 cellListRow_[cellIndex].push_back(i);
1003 }
1004
1005 for (int i = 0; i < nGroupsInCol_; i++) {
1006 rs = cgColData.position[i];
1007
1008 // scaled positions relative to the box vectors
1009 scaled = invHmat * rs;
1010
1011 // wrap the vector back into the unit box by subtracting integer box
1012 // numbers
1013 for (int j = 0; j < 3; j++) {
1014 scaled[j] -= roundMe(scaled[j]);
1015 scaled[j] += 0.5;
1016 }
1017
1018 // find xyz-indices of cell that cutoffGroup is in.
1019 whichCell.x() = nCells_.x() * scaled.x();
1020 whichCell.y() = nCells_.y() * scaled.y();
1021 whichCell.z() = nCells_.z() * scaled.z();
1022
1023 // find single index of this cell:
1024 cellIndex = Vlinear(whichCell, nCells_);
1025
1026 // add this cutoff group to the list of groups in this cell;
1027 cellListCol_[cellIndex].push_back(i);
1028 }
1029 #else
1030 for (int i = 0; i < nGroups_; i++) {
1031 rs = snap_->cgData.position[i];
1032
1033 // scaled positions relative to the box vectors
1034 scaled = invHmat * rs;
1035
1036 // wrap the vector back into the unit box by subtracting integer box
1037 // numbers
1038 for (int j = 0; j < 3; j++) {
1039 scaled[j] -= roundMe(scaled[j]);
1040 scaled[j] += 0.5;
1041 }
1042
1043 // find xyz-indices of cell that cutoffGroup is in.
1044 whichCell.x() = nCells_.x() * scaled.x();
1045 whichCell.y() = nCells_.y() * scaled.y();
1046 whichCell.z() = nCells_.z() * scaled.z();
1047
1048 // find single index of this cell:
1049 cellIndex = Vlinear(whichCell, nCells_);
1050
1051 // add this cutoff group to the list of groups in this cell;
1052 cellList_[cellIndex].push_back(i);
1053 }
1054 #endif
1055
1056 for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1057 for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1058 for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1059 Vector3i m1v(m1x, m1y, m1z);
1060 int m1 = Vlinear(m1v, nCells_);
1061
1062 for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1063 os != cellOffsets_.end(); ++os) {
1064
1065 Vector3i m2v = m1v + (*os);
1066
1067 if (m2v.x() >= nCells_.x()) {
1068 m2v.x() = 0;
1069 } else if (m2v.x() < 0) {
1070 m2v.x() = nCells_.x() - 1;
1071 }
1072
1073 if (m2v.y() >= nCells_.y()) {
1074 m2v.y() = 0;
1075 } else if (m2v.y() < 0) {
1076 m2v.y() = nCells_.y() - 1;
1077 }
1078
1079 if (m2v.z() >= nCells_.z()) {
1080 m2v.z() = 0;
1081 } else if (m2v.z() < 0) {
1082 m2v.z() = nCells_.z() - 1;
1083 }
1084
1085 int m2 = Vlinear (m2v, nCells_);
1086
1087 #ifdef IS_MPI
1088 for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1089 j1 != cellListRow_[m1].end(); ++j1) {
1090 for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1091 j2 != cellListCol_[m2].end(); ++j2) {
1092
1093 // Always do this if we're in different cells or if
1094 // we're in the same cell and the global index of the
1095 // j2 cutoff group is less than the j1 cutoff group
1096
1097 if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
1098 dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1099 snap_->wrapVector(dr);
1100 cuts = getGroupCutoffs( (*j1), (*j2) );
1101 if (dr.lengthSquare() < cuts.third) {
1102 neighborList.push_back(make_pair((*j1), (*j2)));
1103 }
1104 }
1105 }
1106 }
1107 #else
1108
1109 for (vector<int>::iterator j1 = cellList_[m1].begin();
1110 j1 != cellList_[m1].end(); ++j1) {
1111 for (vector<int>::iterator j2 = cellList_[m2].begin();
1112 j2 != cellList_[m2].end(); ++j2) {
1113
1114 // Always do this if we're in different cells or if
1115 // we're in the same cell and the global index of the
1116 // j2 cutoff group is less than the j1 cutoff group
1117
1118 if (m2 != m1 || (*j2) < (*j1)) {
1119 dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1120 snap_->wrapVector(dr);
1121 cuts = getGroupCutoffs( (*j1), (*j2) );
1122 if (dr.lengthSquare() < cuts.third) {
1123 neighborList.push_back(make_pair((*j1), (*j2)));
1124 }
1125 }
1126 }
1127 }
1128 #endif
1129 }
1130 }
1131 }
1132 }
1133
1134 // save the local cutoff group positions for the check that is
1135 // done on each loop:
1136 saved_CG_positions_.clear();
1137 for (int i = 0; i < nGroups_; i++)
1138 saved_CG_positions_.push_back(snap_->cgData.position[i]);
1139
1140 return neighborList;
1141 }
1142 } //end namespace OpenMD