ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing branches/development/src/parallel/ForceMatrixDecomposition.cpp (file contents):
Revision 1569 by gezelter, Thu May 26 13:55:04 2011 UTC vs.
Revision 1616 by gezelter, Tue Aug 30 15:45:35 2011 UTC

# Line 42 | Line 42
42   #include "math/SquareMatrix3.hpp"
43   #include "nonbonded/NonBondedInteraction.hpp"
44   #include "brains/SnapshotManager.hpp"
45 + #include "brains/PairList.hpp"
46  
47   using namespace std;
48   namespace OpenMD {
49  
50 +  ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
51 +
52 +    // In a parallel computation, row and colum scans must visit all
53 +    // surrounding cells (not just the 14 upper triangular blocks that
54 +    // are used when the processor can see all pairs)
55 + #ifdef IS_MPI
56 +    cellOffsets_.clear();
57 +    cellOffsets_.push_back( Vector3i(-1,-1,-1) );
58 +    cellOffsets_.push_back( Vector3i( 0,-1,-1) );
59 +    cellOffsets_.push_back( Vector3i( 1,-1,-1) );                          
60 +    cellOffsets_.push_back( Vector3i(-1, 0,-1) );
61 +    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
62 +    cellOffsets_.push_back( Vector3i( 1, 0,-1) );
63 +    cellOffsets_.push_back( Vector3i(-1, 1,-1) );
64 +    cellOffsets_.push_back( Vector3i( 0, 1,-1) );      
65 +    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
66 +    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
67 +    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
68 +    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
69 +    cellOffsets_.push_back( Vector3i(-1, 0, 0) );      
70 +    cellOffsets_.push_back( Vector3i( 0, 0, 0) );
71 +    cellOffsets_.push_back( Vector3i( 1, 0, 0) );
72 +    cellOffsets_.push_back( Vector3i(-1, 1, 0) );
73 +    cellOffsets_.push_back( Vector3i( 0, 1, 0) );
74 +    cellOffsets_.push_back( Vector3i( 1, 1, 0) );
75 +    cellOffsets_.push_back( Vector3i(-1,-1, 1) );
76 +    cellOffsets_.push_back( Vector3i( 0,-1, 1) );
77 +    cellOffsets_.push_back( Vector3i( 1,-1, 1) );
78 +    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
79 +    cellOffsets_.push_back( Vector3i( 0, 0, 1) );
80 +    cellOffsets_.push_back( Vector3i( 1, 0, 1) );
81 +    cellOffsets_.push_back( Vector3i(-1, 1, 1) );
82 +    cellOffsets_.push_back( Vector3i( 0, 1, 1) );
83 +    cellOffsets_.push_back( Vector3i( 1, 1, 1) );
84 + #endif    
85 +  }
86 +
87 +
88    /**
89     * distributeInitialData is essentially a copy of the older fortran
90     * SimulationSetup
91     */
53  
92    void ForceMatrixDecomposition::distributeInitialData() {
93      snap_ = sman_->getCurrentSnapshot();
94      storageLayout_ = sman_->getStorageLayout();
95 +    ff_ = info_->getForceField();
96      nLocal_ = snap_->getNumberOfAtoms();
97 <    nGroups_ = snap_->getNumberOfCutoffGroups();
98 <
97 >    
98 >    nGroups_ = info_->getNLocalCutoffGroups();
99      // gather the information for atomtype IDs (atids):
100 <    vector<int> identsLocal = info_->getIdentArray();
100 >    idents = info_->getIdentArray();
101      AtomLocalToGlobal = info_->getGlobalAtomIndices();
102      cgLocalToGlobal = info_->getGlobalGroupIndices();
103      vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
65    vector<RealType> massFactorsLocal = info_->getMassFactors();
66    vector<RealType> pot_local(N_INTERACTION_FAMILIES, 0.0);
104  
105 +    massFactors = info_->getMassFactors();
106 +
107 +    PairList* excludes = info_->getExcludedInteractions();
108 +    PairList* oneTwo = info_->getOneTwoInteractions();
109 +    PairList* oneThree = info_->getOneThreeInteractions();
110 +    PairList* oneFour = info_->getOneFourInteractions();
111 +
112   #ifdef IS_MPI
113  
114 <    AtomCommIntRow = new Communicator<Row,int>(nLocal_);
115 <    AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
72 <    AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
73 <    AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
114 >    MPI::Intracomm row = rowComm.getComm();
115 >    MPI::Intracomm col = colComm.getComm();
116  
117 <    AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
118 <    AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
119 <    AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
120 <    AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
117 >    AtomPlanIntRow = new Plan<int>(row, nLocal_);
118 >    AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
119 >    AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
120 >    AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
121 >    AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
122  
123 <    cgCommIntRow = new Communicator<Row,int>(nGroups_);
124 <    cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
125 <    cgCommIntColumn = new Communicator<Column,int>(nGroups_);
126 <    cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
123 >    AtomPlanIntColumn = new Plan<int>(col, nLocal_);
124 >    AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
125 >    AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
126 >    AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
127 >    AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
128  
129 <    nAtomsInRow_ = AtomCommIntRow->getSize();
130 <    nAtomsInCol_ = AtomCommIntColumn->getSize();
131 <    nGroupsInRow_ = cgCommIntRow->getSize();
132 <    nGroupsInCol_ = cgCommIntColumn->getSize();
129 >    cgPlanIntRow = new Plan<int>(row, nGroups_);
130 >    cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
131 >    cgPlanIntColumn = new Plan<int>(col, nGroups_);
132 >    cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
133  
134 +    nAtomsInRow_ = AtomPlanIntRow->getSize();
135 +    nAtomsInCol_ = AtomPlanIntColumn->getSize();
136 +    nGroupsInRow_ = cgPlanIntRow->getSize();
137 +    nGroupsInCol_ = cgPlanIntColumn->getSize();
138 +
139      // Modify the data storage objects with the correct layouts and sizes:
140      atomRowData.resize(nAtomsInRow_);
141      atomRowData.setStorageLayout(storageLayout_);
# Line 96 | Line 145 | namespace OpenMD {
145      cgRowData.setStorageLayout(DataStorage::dslPosition);
146      cgColData.resize(nGroupsInCol_);
147      cgColData.setStorageLayout(DataStorage::dslPosition);
148 +        
149 +    identsRow.resize(nAtomsInRow_);
150 +    identsCol.resize(nAtomsInCol_);
151      
152 <    vector<vector<RealType> > pot_row(N_INTERACTION_FAMILIES,
153 <                                      vector<RealType> (nAtomsInRow_, 0.0));
102 <    vector<vector<RealType> > pot_col(N_INTERACTION_FAMILIES,
103 <                                      vector<RealType> (nAtomsInCol_, 0.0));
152 >    AtomPlanIntRow->gather(idents, identsRow);
153 >    AtomPlanIntColumn->gather(idents, identsCol);
154      
155 <    identsRow.reserve(nAtomsInRow_);
156 <    identsCol.reserve(nAtomsInCol_);
157 <    
108 <    AtomCommIntRow->gather(identsLocal, identsRow);
109 <    AtomCommIntColumn->gather(identsLocal, identsCol);
110 <    
111 <    AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
112 <    AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
113 <    
114 <    cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
115 <    cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
155 >    // allocate memory for the parallel objects
156 >    atypesRow.resize(nAtomsInRow_);
157 >    atypesCol.resize(nAtomsInCol_);
158  
159 <    AtomCommRealRow->gather(massFactorsLocal, massFactorsRow);
160 <    AtomCommRealColumn->gather(massFactorsLocal, massFactorsCol);
159 >    for (int i = 0; i < nAtomsInRow_; i++)
160 >      atypesRow[i] = ff_->getAtomType(identsRow[i]);
161 >    for (int i = 0; i < nAtomsInCol_; i++)
162 >      atypesCol[i] = ff_->getAtomType(identsCol[i]);        
163  
164 +    pot_row.resize(nAtomsInRow_);
165 +    pot_col.resize(nAtomsInCol_);
166 +
167 +    AtomRowToGlobal.resize(nAtomsInRow_);
168 +    AtomColToGlobal.resize(nAtomsInCol_);
169 +    AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
170 +    AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
171 +
172 +    cgRowToGlobal.resize(nGroupsInRow_);
173 +    cgColToGlobal.resize(nGroupsInCol_);
174 +    cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
175 +    cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
176 +
177 +    massFactorsRow.resize(nAtomsInRow_);
178 +    massFactorsCol.resize(nAtomsInCol_);
179 +    AtomPlanRealRow->gather(massFactors, massFactorsRow);
180 +    AtomPlanRealColumn->gather(massFactors, massFactorsCol);
181 +
182      groupListRow_.clear();
183 <    groupListRow_.reserve(nGroupsInRow_);
183 >    groupListRow_.resize(nGroupsInRow_);
184      for (int i = 0; i < nGroupsInRow_; i++) {
185        int gid = cgRowToGlobal[i];
186        for (int j = 0; j < nAtomsInRow_; j++) {
# Line 129 | Line 191 | namespace OpenMD {
191      }
192  
193      groupListCol_.clear();
194 <    groupListCol_.reserve(nGroupsInCol_);
194 >    groupListCol_.resize(nGroupsInCol_);
195      for (int i = 0; i < nGroupsInCol_; i++) {
196        int gid = cgColToGlobal[i];
197        for (int j = 0; j < nAtomsInCol_; j++) {
# Line 139 | Line 201 | namespace OpenMD {
201        }      
202      }
203  
204 +    excludesForAtom.clear();
205 +    excludesForAtom.resize(nAtomsInRow_);
206 +    toposForAtom.clear();
207 +    toposForAtom.resize(nAtomsInRow_);
208 +    topoDist.clear();
209 +    topoDist.resize(nAtomsInRow_);
210 +    for (int i = 0; i < nAtomsInRow_; i++) {
211 +      int iglob = AtomRowToGlobal[i];
212 +
213 +      for (int j = 0; j < nAtomsInCol_; j++) {
214 +        int jglob = AtomColToGlobal[j];
215 +
216 +        if (excludes->hasPair(iglob, jglob))
217 +          excludesForAtom[i].push_back(j);      
218 +        
219 +        if (oneTwo->hasPair(iglob, jglob)) {
220 +          toposForAtom[i].push_back(j);
221 +          topoDist[i].push_back(1);
222 +        } else {
223 +          if (oneThree->hasPair(iglob, jglob)) {
224 +            toposForAtom[i].push_back(j);
225 +            topoDist[i].push_back(2);
226 +          } else {
227 +            if (oneFour->hasPair(iglob, jglob)) {
228 +              toposForAtom[i].push_back(j);
229 +              topoDist[i].push_back(3);
230 +            }
231 +          }
232 +        }
233 +      }      
234 +    }
235 +
236 + #else
237 +    excludesForAtom.clear();
238 +    excludesForAtom.resize(nLocal_);
239 +    toposForAtom.clear();
240 +    toposForAtom.resize(nLocal_);
241 +    topoDist.clear();
242 +    topoDist.resize(nLocal_);
243 +
244 +    for (int i = 0; i < nLocal_; i++) {
245 +      int iglob = AtomLocalToGlobal[i];
246 +
247 +      for (int j = 0; j < nLocal_; j++) {
248 +        int jglob = AtomLocalToGlobal[j];
249 +
250 +        if (excludes->hasPair(iglob, jglob))
251 +          excludesForAtom[i].push_back(j);              
252 +        
253 +        if (oneTwo->hasPair(iglob, jglob)) {
254 +          toposForAtom[i].push_back(j);
255 +          topoDist[i].push_back(1);
256 +        } else {
257 +          if (oneThree->hasPair(iglob, jglob)) {
258 +            toposForAtom[i].push_back(j);
259 +            topoDist[i].push_back(2);
260 +          } else {
261 +            if (oneFour->hasPair(iglob, jglob)) {
262 +              toposForAtom[i].push_back(j);
263 +              topoDist[i].push_back(3);
264 +            }
265 +          }
266 +        }
267 +      }      
268 +    }
269   #endif
270  
271 +    // allocate memory for the parallel objects
272 +    atypesLocal.resize(nLocal_);
273 +
274 +    for (int i = 0; i < nLocal_; i++)
275 +      atypesLocal[i] = ff_->getAtomType(idents[i]);
276 +
277      groupList_.clear();
278 <    groupList_.reserve(nGroups_);
278 >    groupList_.resize(nGroups_);
279      for (int i = 0; i < nGroups_; i++) {
280        int gid = cgLocalToGlobal[i];
281        for (int j = 0; j < nLocal_; j++) {
282          int aid = AtomLocalToGlobal[j];
283 <        if (globalGroupMembership[aid] == gid)
283 >        if (globalGroupMembership[aid] == gid) {
284            groupList_[i].push_back(j);
285 +        }
286        }      
287      }
288  
289 +
290 +    createGtypeCutoffMap();
291 +
292 +  }
293    
294 <    // still need:
295 <    // topoDist
296 <    // exclude
294 >  void ForceMatrixDecomposition::createGtypeCutoffMap() {
295 >    
296 >    RealType tol = 1e-6;
297 >    largestRcut_ = 0.0;
298 >    RealType rc;
299 >    int atid;
300 >    set<AtomType*> atypes = info_->getSimulatedAtomTypes();
301 >    
302 >    map<int, RealType> atypeCutoff;
303 >      
304 >    for (set<AtomType*>::iterator at = atypes.begin();
305 >         at != atypes.end(); ++at){
306 >      atid = (*at)->getIdent();
307 >      if (userChoseCutoff_)
308 >        atypeCutoff[atid] = userCutoff_;
309 >      else
310 >        atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
311 >    }
312 >    
313 >    vector<RealType> gTypeCutoffs;
314 >    // first we do a single loop over the cutoff groups to find the
315 >    // largest cutoff for any atypes present in this group.
316 > #ifdef IS_MPI
317 >    vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
318 >    groupRowToGtype.resize(nGroupsInRow_);
319 >    for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
320 >      vector<int> atomListRow = getAtomsInGroupRow(cg1);
321 >      for (vector<int>::iterator ia = atomListRow.begin();
322 >           ia != atomListRow.end(); ++ia) {            
323 >        int atom1 = (*ia);
324 >        atid = identsRow[atom1];
325 >        if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
326 >          groupCutoffRow[cg1] = atypeCutoff[atid];
327 >        }
328 >      }
329  
330 +      bool gTypeFound = false;
331 +      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
332 +        if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
333 +          groupRowToGtype[cg1] = gt;
334 +          gTypeFound = true;
335 +        }
336 +      }
337 +      if (!gTypeFound) {
338 +        gTypeCutoffs.push_back( groupCutoffRow[cg1] );
339 +        groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
340 +      }
341 +      
342 +    }
343 +    vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
344 +    groupColToGtype.resize(nGroupsInCol_);
345 +    for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
346 +      vector<int> atomListCol = getAtomsInGroupColumn(cg2);
347 +      for (vector<int>::iterator jb = atomListCol.begin();
348 +           jb != atomListCol.end(); ++jb) {            
349 +        int atom2 = (*jb);
350 +        atid = identsCol[atom2];
351 +        if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
352 +          groupCutoffCol[cg2] = atypeCutoff[atid];
353 +        }
354 +      }
355 +      bool gTypeFound = false;
356 +      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
357 +        if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
358 +          groupColToGtype[cg2] = gt;
359 +          gTypeFound = true;
360 +        }
361 +      }
362 +      if (!gTypeFound) {
363 +        gTypeCutoffs.push_back( groupCutoffCol[cg2] );
364 +        groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
365 +      }
366 +    }
367 + #else
368 +
369 +    vector<RealType> groupCutoff(nGroups_, 0.0);
370 +    groupToGtype.resize(nGroups_);
371 +    for (int cg1 = 0; cg1 < nGroups_; cg1++) {
372 +      groupCutoff[cg1] = 0.0;
373 +      vector<int> atomList = getAtomsInGroupRow(cg1);
374 +      for (vector<int>::iterator ia = atomList.begin();
375 +           ia != atomList.end(); ++ia) {            
376 +        int atom1 = (*ia);
377 +        atid = idents[atom1];
378 +        if (atypeCutoff[atid] > groupCutoff[cg1])
379 +          groupCutoff[cg1] = atypeCutoff[atid];
380 +      }
381 +      
382 +      bool gTypeFound = false;
383 +      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
384 +        if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
385 +          groupToGtype[cg1] = gt;
386 +          gTypeFound = true;
387 +        }
388 +      }
389 +      if (!gTypeFound) {      
390 +        gTypeCutoffs.push_back( groupCutoff[cg1] );
391 +        groupToGtype[cg1] = gTypeCutoffs.size() - 1;
392 +      }      
393 +    }
394 + #endif
395 +
396 +    // Now we find the maximum group cutoff value present in the simulation
397 +
398 +    RealType groupMax = *max_element(gTypeCutoffs.begin(),
399 +                                     gTypeCutoffs.end());
400 +
401 + #ifdef IS_MPI
402 +    MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE,
403 +                              MPI::MAX);
404 + #endif
405 +    
406 +    RealType tradRcut = groupMax;
407 +
408 +    for (int i = 0; i < gTypeCutoffs.size();  i++) {
409 +      for (int j = 0; j < gTypeCutoffs.size();  j++) {      
410 +        RealType thisRcut;
411 +        switch(cutoffPolicy_) {
412 +        case TRADITIONAL:
413 +          thisRcut = tradRcut;
414 +          break;
415 +        case MIX:
416 +          thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
417 +          break;
418 +        case MAX:
419 +          thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
420 +          break;
421 +        default:
422 +          sprintf(painCave.errMsg,
423 +                  "ForceMatrixDecomposition::createGtypeCutoffMap "
424 +                  "hit an unknown cutoff policy!\n");
425 +          painCave.severity = OPENMD_ERROR;
426 +          painCave.isFatal = 1;
427 +          simError();
428 +          break;
429 +        }
430 +
431 +        pair<int,int> key = make_pair(i,j);
432 +        gTypeCutoffMap[key].first = thisRcut;
433 +        if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
434 +        gTypeCutoffMap[key].second = thisRcut*thisRcut;
435 +        gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
436 +        // sanity check
437 +        
438 +        if (userChoseCutoff_) {
439 +          if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
440 +            sprintf(painCave.errMsg,
441 +                    "ForceMatrixDecomposition::createGtypeCutoffMap "
442 +                    "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
443 +            painCave.severity = OPENMD_ERROR;
444 +            painCave.isFatal = 1;
445 +            simError();            
446 +          }
447 +        }
448 +      }
449 +    }
450    }
451 +
452 +
453 +  groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
454 +    int i, j;  
455 + #ifdef IS_MPI
456 +    i = groupRowToGtype[cg1];
457 +    j = groupColToGtype[cg2];
458 + #else
459 +    i = groupToGtype[cg1];
460 +    j = groupToGtype[cg2];
461 + #endif    
462 +    return gTypeCutoffMap[make_pair(i,j)];
463 +  }
464 +
465 +  int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
466 +    for (int j = 0; j < toposForAtom[atom1].size(); j++) {
467 +      if (toposForAtom[atom1][j] == atom2)
468 +        return topoDist[atom1][j];
469 +    }
470 +    return 0;
471 +  }
472 +
473 +  void ForceMatrixDecomposition::zeroWorkArrays() {
474 +    pairwisePot = 0.0;
475 +    embeddingPot = 0.0;
476 +
477 + #ifdef IS_MPI
478 +    if (storageLayout_ & DataStorage::dslForce) {
479 +      fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
480 +      fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
481 +    }
482 +
483 +    if (storageLayout_ & DataStorage::dslTorque) {
484 +      fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
485 +      fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
486 +    }
487      
488 +    fill(pot_row.begin(), pot_row.end(),
489 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
490  
491 +    fill(pot_col.begin(), pot_col.end(),
492 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
493  
494 +    if (storageLayout_ & DataStorage::dslParticlePot) {    
495 +      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
496 +           0.0);
497 +      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
498 +           0.0);
499 +    }
500 +
501 +    if (storageLayout_ & DataStorage::dslDensity) {      
502 +      fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
503 +      fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
504 +    }
505 +
506 +    if (storageLayout_ & DataStorage::dslFunctional) {  
507 +      fill(atomRowData.functional.begin(), atomRowData.functional.end(),
508 +           0.0);
509 +      fill(atomColData.functional.begin(), atomColData.functional.end(),
510 +           0.0);
511 +    }
512 +
513 +    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
514 +      fill(atomRowData.functionalDerivative.begin(),
515 +           atomRowData.functionalDerivative.end(), 0.0);
516 +      fill(atomColData.functionalDerivative.begin(),
517 +           atomColData.functionalDerivative.end(), 0.0);
518 +    }
519 +
520 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
521 +      fill(atomRowData.skippedCharge.begin(),
522 +           atomRowData.skippedCharge.end(), 0.0);
523 +      fill(atomColData.skippedCharge.begin(),
524 +           atomColData.skippedCharge.end(), 0.0);
525 +    }
526 +
527 + #endif
528 +    // even in parallel, we need to zero out the local arrays:
529 +
530 +    if (storageLayout_ & DataStorage::dslParticlePot) {      
531 +      fill(snap_->atomData.particlePot.begin(),
532 +           snap_->atomData.particlePot.end(), 0.0);
533 +    }
534 +    
535 +    if (storageLayout_ & DataStorage::dslDensity) {      
536 +      fill(snap_->atomData.density.begin(),
537 +           snap_->atomData.density.end(), 0.0);
538 +    }
539 +    if (storageLayout_ & DataStorage::dslFunctional) {
540 +      fill(snap_->atomData.functional.begin(),
541 +           snap_->atomData.functional.end(), 0.0);
542 +    }
543 +    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
544 +      fill(snap_->atomData.functionalDerivative.begin(),
545 +           snap_->atomData.functionalDerivative.end(), 0.0);
546 +    }
547 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
548 +      fill(snap_->atomData.skippedCharge.begin(),
549 +           snap_->atomData.skippedCharge.end(), 0.0);
550 +    }
551 +    
552 +  }
553 +
554 +
555    void ForceMatrixDecomposition::distributeData()  {
556      snap_ = sman_->getCurrentSnapshot();
557      storageLayout_ = sman_->getStorageLayout();
558   #ifdef IS_MPI
559      
560      // gather up the atomic positions
561 <    AtomCommVectorRow->gather(snap_->atomData.position,
561 >    AtomPlanVectorRow->gather(snap_->atomData.position,
562                                atomRowData.position);
563 <    AtomCommVectorColumn->gather(snap_->atomData.position,
563 >    AtomPlanVectorColumn->gather(snap_->atomData.position,
564                                   atomColData.position);
565      
566      // gather up the cutoff group positions
567 <    cgCommVectorRow->gather(snap_->cgData.position,
567 >
568 >    cgPlanVectorRow->gather(snap_->cgData.position,
569                              cgRowData.position);
570 <    cgCommVectorColumn->gather(snap_->cgData.position,
570 >
571 >    cgPlanVectorColumn->gather(snap_->cgData.position,
572                                 cgColData.position);
573 +
574      
575      // if needed, gather the atomic rotation matrices
576      if (storageLayout_ & DataStorage::dslAmat) {
577 <      AtomCommMatrixRow->gather(snap_->atomData.aMat,
577 >      AtomPlanMatrixRow->gather(snap_->atomData.aMat,
578                                  atomRowData.aMat);
579 <      AtomCommMatrixColumn->gather(snap_->atomData.aMat,
579 >      AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
580                                     atomColData.aMat);
581      }
582      
583      // if needed, gather the atomic eletrostatic frames
584      if (storageLayout_ & DataStorage::dslElectroFrame) {
585 <      AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
585 >      AtomPlanMatrixRow->gather(snap_->atomData.electroFrame,
586                                  atomRowData.electroFrame);
587 <      AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
587 >      AtomPlanMatrixColumn->gather(snap_->atomData.electroFrame,
588                                     atomColData.electroFrame);
589      }
590 +
591   #endif      
592    }
593    
594 +  /* collects information obtained during the pre-pair loop onto local
595 +   * data structures.
596 +   */
597    void ForceMatrixDecomposition::collectIntermediateData() {
598      snap_ = sman_->getCurrentSnapshot();
599      storageLayout_ = sman_->getStorageLayout();
# Line 203 | Line 601 | namespace OpenMD {
601      
602      if (storageLayout_ & DataStorage::dslDensity) {
603        
604 <      AtomCommRealRow->scatter(atomRowData.density,
604 >      AtomPlanRealRow->scatter(atomRowData.density,
605                                 snap_->atomData.density);
606        
607        int n = snap_->atomData.density.size();
608 <      std::vector<RealType> rho_tmp(n, 0.0);
609 <      AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
608 >      vector<RealType> rho_tmp(n, 0.0);
609 >      AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
610        for (int i = 0; i < n; i++)
611          snap_->atomData.density[i] += rho_tmp[i];
612      }
613   #endif
614    }
615 <  
615 >
616 >  /*
617 >   * redistributes information obtained during the pre-pair loop out to
618 >   * row and column-indexed data structures
619 >   */
620    void ForceMatrixDecomposition::distributeIntermediateData() {
621      snap_ = sman_->getCurrentSnapshot();
622      storageLayout_ = sman_->getStorageLayout();
623   #ifdef IS_MPI
624      if (storageLayout_ & DataStorage::dslFunctional) {
625 <      AtomCommRealRow->gather(snap_->atomData.functional,
625 >      AtomPlanRealRow->gather(snap_->atomData.functional,
626                                atomRowData.functional);
627 <      AtomCommRealColumn->gather(snap_->atomData.functional,
627 >      AtomPlanRealColumn->gather(snap_->atomData.functional,
628                                   atomColData.functional);
629      }
630      
631      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
632 <      AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
632 >      AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
633                                atomRowData.functionalDerivative);
634 <      AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
634 >      AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
635                                   atomColData.functionalDerivative);
636      }
637   #endif
# Line 243 | Line 645 | namespace OpenMD {
645      int n = snap_->atomData.force.size();
646      vector<Vector3d> frc_tmp(n, V3Zero);
647      
648 <    AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
648 >    AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
649      for (int i = 0; i < n; i++) {
650        snap_->atomData.force[i] += frc_tmp[i];
651        frc_tmp[i] = 0.0;
652      }
653      
654 <    AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
655 <    for (int i = 0; i < n; i++)
654 >    AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
655 >    for (int i = 0; i < n; i++) {
656        snap_->atomData.force[i] += frc_tmp[i];
657 <    
658 <    
657 >    }
658 >        
659      if (storageLayout_ & DataStorage::dslTorque) {
660  
661 <      int nt = snap_->atomData.force.size();
661 >      int nt = snap_->atomData.torque.size();
662        vector<Vector3d> trq_tmp(nt, V3Zero);
663  
664 <      AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
665 <      for (int i = 0; i < n; i++) {
664 >      AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
665 >      for (int i = 0; i < nt; i++) {
666          snap_->atomData.torque[i] += trq_tmp[i];
667          trq_tmp[i] = 0.0;
668        }
669        
670 <      AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
671 <      for (int i = 0; i < n; i++)
670 >      AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
671 >      for (int i = 0; i < nt; i++)
672          snap_->atomData.torque[i] += trq_tmp[i];
673      }
674 +
675 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
676 +
677 +      int ns = snap_->atomData.skippedCharge.size();
678 +      vector<RealType> skch_tmp(ns, 0.0);
679 +
680 +      AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
681 +      for (int i = 0; i < ns; i++) {
682 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
683 +        skch_tmp[i] = 0.0;
684 +      }
685 +      
686 +      AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
687 +      for (int i = 0; i < ns; i++)
688 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
689 +            
690 +    }
691      
692      nLocal_ = snap_->getNumberOfAtoms();
693  
694 <    vector<vector<RealType> > pot_temp(N_INTERACTION_FAMILIES,
695 <                                       vector<RealType> (nLocal_, 0.0));
694 >    vector<potVec> pot_temp(nLocal_,
695 >                            Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
696 >
697 >    // scatter/gather pot_row into the members of my column
698 >          
699 >    AtomPlanPotRow->scatter(pot_row, pot_temp);
700 >
701 >    for (int ii = 0;  ii < pot_temp.size(); ii++ )
702 >      pairwisePot += pot_temp[ii];
703      
704 <    for (int i = 0; i < N_INTERACTION_FAMILIES; i++) {
705 <      AtomCommRealRow->scatter(pot_row[i], pot_temp[i]);
706 <      for (int ii = 0;  ii < pot_temp[i].size(); ii++ ) {
707 <        pot_local[i] += pot_temp[i][ii];
708 <      }
704 >    fill(pot_temp.begin(), pot_temp.end(),
705 >         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
706 >      
707 >    AtomPlanPotColumn->scatter(pot_col, pot_temp);    
708 >    
709 >    for (int ii = 0;  ii < pot_temp.size(); ii++ )
710 >      pairwisePot += pot_temp[ii];    
711 >    
712 >    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
713 >      RealType ploc1 = pairwisePot[ii];
714 >      RealType ploc2 = 0.0;
715 >      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
716 >      pairwisePot[ii] = ploc2;
717      }
718 +
719 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
720 +      RealType ploc1 = embeddingPot[ii];
721 +      RealType ploc2 = 0.0;
722 +      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
723 +      embeddingPot[ii] = ploc2;
724 +    }
725 +
726   #endif
727 +
728    }
729  
730 +  int ForceMatrixDecomposition::getNAtomsInRow() {  
731 + #ifdef IS_MPI
732 +    return nAtomsInRow_;
733 + #else
734 +    return nLocal_;
735 + #endif
736 +  }
737 +
738    /**
739     * returns the list of atoms belonging to this group.  
740     */
# Line 348 | Line 799 | namespace OpenMD {
799   #ifdef IS_MPI
800      return massFactorsRow[atom1];
801   #else
802 <    return massFactorsLocal[atom1];
802 >    return massFactors[atom1];
803   #endif
804    }
805  
# Line 356 | Line 807 | namespace OpenMD {
807   #ifdef IS_MPI
808      return massFactorsCol[atom2];
809   #else
810 <    return massFactorsLocal[atom2];
810 >    return massFactors[atom2];
811   #endif
812  
813    }
# Line 374 | Line 825 | namespace OpenMD {
825      return d;    
826    }
827  
828 +  vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
829 +    return excludesForAtom[atom1];
830 +  }
831 +
832 +  /**
833 +   * We need to exclude some overcounted interactions that result from
834 +   * the parallel decomposition.
835 +   */
836 +  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
837 +    int unique_id_1, unique_id_2;
838 +        
839 + #ifdef IS_MPI
840 +    // in MPI, we have to look up the unique IDs for each atom
841 +    unique_id_1 = AtomRowToGlobal[atom1];
842 +    unique_id_2 = AtomColToGlobal[atom2];
843 + #else
844 +    unique_id_1 = AtomLocalToGlobal[atom1];
845 +    unique_id_2 = AtomLocalToGlobal[atom2];
846 + #endif  
847 +
848 +    if (unique_id_1 == unique_id_2) return true;
849 +
850 + #ifdef IS_MPI
851 +    // this prevents us from doing the pair on multiple processors
852 +    if (unique_id_1 < unique_id_2) {
853 +      if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
854 +    } else {
855 +      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
856 +    }
857 + #endif
858 +    
859 +    return false;
860 +  }
861 +
862 +  /**
863 +   * We need to handle the interactions for atoms who are involved in
864 +   * the same rigid body as well as some short range interactions
865 +   * (bonds, bends, torsions) differently from other interactions.
866 +   * We'll still visit the pairwise routines, but with a flag that
867 +   * tells those routines to exclude the pair from direct long range
868 +   * interactions.  Some indirect interactions (notably reaction
869 +   * field) must still be handled for these pairs.
870 +   */
871 +  bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
872 +
873 +    // excludesForAtom was constructed to use row/column indices in the MPI
874 +    // version, and to use local IDs in the non-MPI version:
875 +    
876 +    for (vector<int>::iterator i = excludesForAtom[atom1].begin();
877 +         i != excludesForAtom[atom1].end(); ++i) {
878 +      if ( (*i) == atom2 ) return true;
879 +    }
880 +
881 +    return false;
882 +  }
883 +
884 +
885    void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
886   #ifdef IS_MPI
887      atomRowData.force[atom1] += fg;
# Line 391 | Line 899 | namespace OpenMD {
899    }
900  
901      // filling interaction blocks with pointers
902 <  InteractionData ForceMatrixDecomposition::fillInteractionData(int atom1, int atom2) {    
903 <    InteractionData idat;
902 >  void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
903 >                                                     int atom1, int atom2) {
904  
905 +    idat.excluded = excludeAtomPair(atom1, atom2);
906 +  
907   #ifdef IS_MPI
908 +    idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
909 +    //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
910 +    //                         ff_->getAtomType(identsCol[atom2]) );
911 +    
912      if (storageLayout_ & DataStorage::dslAmat) {
913        idat.A1 = &(atomRowData.aMat[atom1]);
914        idat.A2 = &(atomColData.aMat[atom2]);
# Line 415 | Line 929 | namespace OpenMD {
929        idat.rho2 = &(atomColData.density[atom2]);
930      }
931  
932 +    if (storageLayout_ & DataStorage::dslFunctional) {
933 +      idat.frho1 = &(atomRowData.functional[atom1]);
934 +      idat.frho2 = &(atomColData.functional[atom2]);
935 +    }
936 +
937      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
938        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
939        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
940      }
941 +
942 +    if (storageLayout_ & DataStorage::dslParticlePot) {
943 +      idat.particlePot1 = &(atomRowData.particlePot[atom1]);
944 +      idat.particlePot2 = &(atomColData.particlePot[atom2]);
945 +    }
946 +
947 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {              
948 +      idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
949 +      idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
950 +    }
951 +
952   #else
953 +
954 +    idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
955 +    //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
956 +    //                         ff_->getAtomType(idents[atom2]) );
957 +
958      if (storageLayout_ & DataStorage::dslAmat) {
959        idat.A1 = &(snap_->atomData.aMat[atom1]);
960        idat.A2 = &(snap_->atomData.aMat[atom2]);
# Line 435 | Line 970 | namespace OpenMD {
970        idat.t2 = &(snap_->atomData.torque[atom2]);
971      }
972  
973 <    if (storageLayout_ & DataStorage::dslDensity) {
973 >    if (storageLayout_ & DataStorage::dslDensity) {    
974        idat.rho1 = &(snap_->atomData.density[atom1]);
975        idat.rho2 = &(snap_->atomData.density[atom2]);
976      }
977  
978 +    if (storageLayout_ & DataStorage::dslFunctional) {
979 +      idat.frho1 = &(snap_->atomData.functional[atom1]);
980 +      idat.frho2 = &(snap_->atomData.functional[atom2]);
981 +    }
982 +
983      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
984        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
985        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
986      }
987 +
988 +    if (storageLayout_ & DataStorage::dslParticlePot) {
989 +      idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
990 +      idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
991 +    }
992 +
993 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
994 +      idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
995 +      idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
996 +    }
997   #endif
448    return idat;
998    }
999  
1000 <  InteractionData ForceMatrixDecomposition::fillSkipData(int atom1, int atom2){
1001 <
453 <    InteractionData idat;
1000 >  
1001 >  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {    
1002   #ifdef IS_MPI
1003 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
1004 <      idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
1005 <      idat.eFrame2 = &(atomColData.electroFrame[atom2]);
1006 <    }
1007 <    if (storageLayout_ & DataStorage::dslTorque) {
460 <      idat.t1 = &(atomRowData.torque[atom1]);
461 <      idat.t2 = &(atomColData.torque[atom2]);
462 <    }
463 <    if (storageLayout_ & DataStorage::dslForce) {
464 <      idat.t1 = &(atomRowData.force[atom1]);
465 <      idat.t2 = &(atomColData.force[atom2]);
466 <    }
1003 >    pot_row[atom1] += 0.5 *  *(idat.pot);
1004 >    pot_col[atom2] += 0.5 *  *(idat.pot);
1005 >
1006 >    atomRowData.force[atom1] += *(idat.f1);
1007 >    atomColData.force[atom2] -= *(idat.f1);
1008   #else
1009 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
1010 <      idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
1011 <      idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
1012 <    }
472 <    if (storageLayout_ & DataStorage::dslTorque) {
473 <      idat.t1 = &(snap_->atomData.torque[atom1]);
474 <      idat.t2 = &(snap_->atomData.torque[atom2]);
475 <    }
476 <    if (storageLayout_ & DataStorage::dslForce) {
477 <      idat.t1 = &(snap_->atomData.force[atom1]);
478 <      idat.t2 = &(snap_->atomData.force[atom2]);
479 <    }
1009 >    pairwisePot += *(idat.pot);
1010 >
1011 >    snap_->atomData.force[atom1] += *(idat.f1);
1012 >    snap_->atomData.force[atom2] -= *(idat.f1);
1013   #endif
1014      
1015    }
1016  
484
485
486
1017    /*
1018     * buildNeighborList
1019     *
# Line 493 | Line 1023 | namespace OpenMD {
1023    vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1024        
1025      vector<pair<int, int> > neighborList;
1026 +    groupCutoffs cuts;
1027 +    bool doAllPairs = false;
1028 +
1029   #ifdef IS_MPI
1030      cellListRow_.clear();
1031      cellListCol_.clear();
# Line 500 | Line 1033 | namespace OpenMD {
1033      cellList_.clear();
1034   #endif
1035  
1036 <    // dangerous to not do error checking.
504 <    RealType rCut_;
505 <
506 <    RealType rList_ = (rCut_ + skinThickness_);
1036 >    RealType rList_ = (largestRcut_ + skinThickness_);
1037      RealType rl2 = rList_ * rList_;
1038      Snapshot* snap_ = sman_->getCurrentSnapshot();
1039      Mat3x3d Hmat = snap_->getHmat();
# Line 515 | Line 1045 | namespace OpenMD {
1045      nCells_.y() = (int) ( Hy.length() )/ rList_;
1046      nCells_.z() = (int) ( Hz.length() )/ rList_;
1047  
1048 +    // handle small boxes where the cell offsets can end up repeating cells
1049 +    
1050 +    if (nCells_.x() < 3) doAllPairs = true;
1051 +    if (nCells_.y() < 3) doAllPairs = true;
1052 +    if (nCells_.z() < 3) doAllPairs = true;
1053 +
1054      Mat3x3d invHmat = snap_->getInvHmat();
1055      Vector3d rs, scaled, dr;
1056      Vector3i whichCell;
1057      int cellIndex;
1058 +    int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1059  
1060   #ifdef IS_MPI
1061 <    for (int i = 0; i < nGroupsInRow_; i++) {
1062 <      rs = cgRowData.position[i];
1063 <      // scaled positions relative to the box vectors
1064 <      scaled = invHmat * rs;
1065 <      // wrap the vector back into the unit box by subtracting integer box
529 <      // numbers
530 <      for (int j = 0; j < 3; j++)
531 <        scaled[j] -= roundMe(scaled[j]);
532 <    
533 <      // find xyz-indices of cell that cutoffGroup is in.
534 <      whichCell.x() = nCells_.x() * scaled.x();
535 <      whichCell.y() = nCells_.y() * scaled.y();
536 <      whichCell.z() = nCells_.z() * scaled.z();
1061 >    cellListRow_.resize(nCtot);
1062 >    cellListCol_.resize(nCtot);
1063 > #else
1064 >    cellList_.resize(nCtot);
1065 > #endif
1066  
1067 <      // find single index of this cell:
1068 <      cellIndex = Vlinear(whichCell, nCells_);
540 <      // add this cutoff group to the list of groups in this cell;
541 <      cellListRow_[cellIndex].push_back(i);
542 <    }
543 <
544 <    for (int i = 0; i < nGroupsInCol_; i++) {
545 <      rs = cgColData.position[i];
546 <      // scaled positions relative to the box vectors
547 <      scaled = invHmat * rs;
548 <      // wrap the vector back into the unit box by subtracting integer box
549 <      // numbers
550 <      for (int j = 0; j < 3; j++)
551 <        scaled[j] -= roundMe(scaled[j]);
1067 >    if (!doAllPairs) {
1068 > #ifdef IS_MPI
1069  
1070 <      // find xyz-indices of cell that cutoffGroup is in.
1071 <      whichCell.x() = nCells_.x() * scaled.x();
1072 <      whichCell.y() = nCells_.y() * scaled.y();
1073 <      whichCell.z() = nCells_.z() * scaled.z();
1074 <
1075 <      // find single index of this cell:
1076 <      cellIndex = Vlinear(whichCell, nCells_);
1077 <      // add this cutoff group to the list of groups in this cell;
1078 <      cellListCol_[cellIndex].push_back(i);
1079 <    }
1070 >      for (int i = 0; i < nGroupsInRow_; i++) {
1071 >        rs = cgRowData.position[i];
1072 >        
1073 >        // scaled positions relative to the box vectors
1074 >        scaled = invHmat * rs;
1075 >        
1076 >        // wrap the vector back into the unit box by subtracting integer box
1077 >        // numbers
1078 >        for (int j = 0; j < 3; j++) {
1079 >          scaled[j] -= roundMe(scaled[j]);
1080 >          scaled[j] += 0.5;
1081 >        }
1082 >        
1083 >        // find xyz-indices of cell that cutoffGroup is in.
1084 >        whichCell.x() = nCells_.x() * scaled.x();
1085 >        whichCell.y() = nCells_.y() * scaled.y();
1086 >        whichCell.z() = nCells_.z() * scaled.z();
1087 >        
1088 >        // find single index of this cell:
1089 >        cellIndex = Vlinear(whichCell, nCells_);
1090 >        
1091 >        // add this cutoff group to the list of groups in this cell;
1092 >        cellListRow_[cellIndex].push_back(i);
1093 >      }
1094 >      for (int i = 0; i < nGroupsInCol_; i++) {
1095 >        rs = cgColData.position[i];
1096 >        
1097 >        // scaled positions relative to the box vectors
1098 >        scaled = invHmat * rs;
1099 >        
1100 >        // wrap the vector back into the unit box by subtracting integer box
1101 >        // numbers
1102 >        for (int j = 0; j < 3; j++) {
1103 >          scaled[j] -= roundMe(scaled[j]);
1104 >          scaled[j] += 0.5;
1105 >        }
1106 >        
1107 >        // find xyz-indices of cell that cutoffGroup is in.
1108 >        whichCell.x() = nCells_.x() * scaled.x();
1109 >        whichCell.y() = nCells_.y() * scaled.y();
1110 >        whichCell.z() = nCells_.z() * scaled.z();
1111 >        
1112 >        // find single index of this cell:
1113 >        cellIndex = Vlinear(whichCell, nCells_);
1114 >        
1115 >        // add this cutoff group to the list of groups in this cell;
1116 >        cellListCol_[cellIndex].push_back(i);
1117 >      }
1118 >    
1119   #else
1120 <    for (int i = 0; i < nGroups_; i++) {
1121 <      rs = snap_->cgData.position[i];
1122 <      // scaled positions relative to the box vectors
1123 <      scaled = invHmat * rs;
1124 <      // wrap the vector back into the unit box by subtracting integer box
1125 <      // numbers
1126 <      for (int j = 0; j < 3; j++)
1127 <        scaled[j] -= roundMe(scaled[j]);
1120 >      for (int i = 0; i < nGroups_; i++) {
1121 >        rs = snap_->cgData.position[i];
1122 >        
1123 >        // scaled positions relative to the box vectors
1124 >        scaled = invHmat * rs;
1125 >        
1126 >        // wrap the vector back into the unit box by subtracting integer box
1127 >        // numbers
1128 >        for (int j = 0; j < 3; j++) {
1129 >          scaled[j] -= roundMe(scaled[j]);
1130 >          scaled[j] += 0.5;
1131 >        }
1132 >        
1133 >        // find xyz-indices of cell that cutoffGroup is in.
1134 >        whichCell.x() = nCells_.x() * scaled.x();
1135 >        whichCell.y() = nCells_.y() * scaled.y();
1136 >        whichCell.z() = nCells_.z() * scaled.z();
1137 >        
1138 >        // find single index of this cell:
1139 >        cellIndex = Vlinear(whichCell, nCells_);
1140 >        
1141 >        // add this cutoff group to the list of groups in this cell;
1142 >        cellList_[cellIndex].push_back(i);
1143 >      }
1144  
573      // find xyz-indices of cell that cutoffGroup is in.
574      whichCell.x() = nCells_.x() * scaled.x();
575      whichCell.y() = nCells_.y() * scaled.y();
576      whichCell.z() = nCells_.z() * scaled.z();
577
578      // find single index of this cell:
579      cellIndex = Vlinear(whichCell, nCells_);
580      // add this cutoff group to the list of groups in this cell;
581      cellList_[cellIndex].push_back(i);
582    }
1145   #endif
1146  
1147 <
1148 <
1149 <    for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1150 <      for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1151 <        for (int m1x = 0; m1x < nCells_.x(); m1x++) {
590 <          Vector3i m1v(m1x, m1y, m1z);
591 <          int m1 = Vlinear(m1v, nCells_);
592 <
593 <          for (vector<Vector3i>::iterator os = cellOffsets_.begin();
594 <               os != cellOffsets_.end(); ++os) {
1147 >      for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1148 >        for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1149 >          for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1150 >            Vector3i m1v(m1x, m1y, m1z);
1151 >            int m1 = Vlinear(m1v, nCells_);
1152              
1153 <            Vector3i m2v = m1v + (*os);
1154 <            
1155 <            if (m2v.x() >= nCells_.x()) {
1156 <              m2v.x() = 0;          
1157 <            } else if (m2v.x() < 0) {
601 <              m2v.x() = nCells_.x() - 1;
602 <            }
603 <            
604 <            if (m2v.y() >= nCells_.y()) {
605 <              m2v.y() = 0;          
606 <            } else if (m2v.y() < 0) {
607 <              m2v.y() = nCells_.y() - 1;
608 <            }
609 <            
610 <            if (m2v.z() >= nCells_.z()) {
611 <              m2v.z() = 0;          
612 <            } else if (m2v.z() < 0) {
613 <              m2v.z() = nCells_.z() - 1;
614 <            }
615 <            
616 <            int m2 = Vlinear (m2v, nCells_);
1153 >            for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1154 >                 os != cellOffsets_.end(); ++os) {
1155 >              
1156 >              Vector3i m2v = m1v + (*os);
1157 >            
1158  
1159 < #ifdef IS_MPI
1160 <            for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1161 <                 j1 != cellListRow_[m1].end(); ++j1) {
1162 <              for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1163 <                   j2 != cellListCol_[m2].end(); ++j2) {
1164 <                              
1165 <                // Always do this if we're in different cells or if
1166 <                // we're in the same cell and the global index of the
1167 <                // j2 cutoff group is less than the j1 cutoff group
1159 >              if (m2v.x() >= nCells_.x()) {
1160 >                m2v.x() = 0;          
1161 >              } else if (m2v.x() < 0) {
1162 >                m2v.x() = nCells_.x() - 1;
1163 >              }
1164 >              
1165 >              if (m2v.y() >= nCells_.y()) {
1166 >                m2v.y() = 0;          
1167 >              } else if (m2v.y() < 0) {
1168 >                m2v.y() = nCells_.y() - 1;
1169 >              }
1170 >              
1171 >              if (m2v.z() >= nCells_.z()) {
1172 >                m2v.z() = 0;          
1173 >              } else if (m2v.z() < 0) {
1174 >                m2v.z() = nCells_.z() - 1;
1175 >              }
1176  
1177 <                if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
1177 >              int m2 = Vlinear (m2v, nCells_);
1178 >              
1179 > #ifdef IS_MPI
1180 >              for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1181 >                   j1 != cellListRow_[m1].end(); ++j1) {
1182 >                for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1183 >                     j2 != cellListCol_[m2].end(); ++j2) {
1184 >                  
1185 >                  // In parallel, we need to visit *all* pairs of row
1186 >                  // & column indicies and will divide labor in the
1187 >                  // force evaluation later.
1188                    dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1189                    snap_->wrapVector(dr);
1190 <                  if (dr.lengthSquare() < rl2) {
1190 >                  cuts = getGroupCutoffs( (*j1), (*j2) );
1191 >                  if (dr.lengthSquare() < cuts.third) {
1192                      neighborList.push_back(make_pair((*j1), (*j2)));
1193 <                  }
1193 >                  }                  
1194                  }
1195                }
636            }
1196   #else
1197 <            for (vector<int>::iterator j1 = cellList_[m1].begin();
1198 <                 j1 != cellList_[m1].end(); ++j1) {
1199 <              for (vector<int>::iterator j2 = cellList_[m2].begin();
1200 <                   j2 != cellList_[m2].end(); ++j2) {
1201 <                              
1202 <                // Always do this if we're in different cells or if
1203 <                // we're in the same cell and the global index of the
1204 <                // j2 cutoff group is less than the j1 cutoff group
1197 >              for (vector<int>::iterator j1 = cellList_[m1].begin();
1198 >                   j1 != cellList_[m1].end(); ++j1) {
1199 >                for (vector<int>::iterator j2 = cellList_[m2].begin();
1200 >                     j2 != cellList_[m2].end(); ++j2) {
1201 >    
1202 >                  // Always do this if we're in different cells or if
1203 >                  // we're in the same cell and the global index of
1204 >                  // the j2 cutoff group is greater than or equal to
1205 >                  // the j1 cutoff group.  Note that Rappaport's code
1206 >                  // has a "less than" conditional here, but that
1207 >                  // deals with atom-by-atom computation.  OpenMD
1208 >                  // allows atoms within a single cutoff group to
1209 >                  // interact with each other.
1210  
1211 <                if (m2 != m1 || (*j2) < (*j1)) {
1212 <                  dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1213 <                  snap_->wrapVector(dr);
1214 <                  if (dr.lengthSquare() < rl2) {
1215 <                    neighborList.push_back(make_pair((*j1), (*j2)));
1211 >
1212 >
1213 >                  if (m2 != m1 || (*j2) >= (*j1) ) {
1214 >
1215 >                    dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1216 >                    snap_->wrapVector(dr);
1217 >                    cuts = getGroupCutoffs( (*j1), (*j2) );
1218 >                    if (dr.lengthSquare() < cuts.third) {
1219 >                      neighborList.push_back(make_pair((*j1), (*j2)));
1220 >                    }
1221                    }
1222                  }
1223                }
655            }
1224   #endif
1225 +            }
1226            }
1227          }
1228        }
1229 +    } else {
1230 +      // branch to do all cutoff group pairs
1231 + #ifdef IS_MPI
1232 +      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1233 +        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1234 +          dr = cgColData.position[j2] - cgRowData.position[j1];
1235 +          snap_->wrapVector(dr);
1236 +          cuts = getGroupCutoffs( j1, j2 );
1237 +          if (dr.lengthSquare() < cuts.third) {
1238 +            neighborList.push_back(make_pair(j1, j2));
1239 +          }
1240 +        }
1241 +      }      
1242 + #else
1243 +      // include all groups here.
1244 +      for (int j1 = 0; j1 < nGroups_; j1++) {
1245 +        // include self group interactions j2 == j1
1246 +        for (int j2 = j1; j2 < nGroups_; j2++) {
1247 +          dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1248 +          snap_->wrapVector(dr);
1249 +          cuts = getGroupCutoffs( j1, j2 );
1250 +          if (dr.lengthSquare() < cuts.third) {
1251 +            neighborList.push_back(make_pair(j1, j2));
1252 +          }
1253 +        }    
1254 +      }
1255 + #endif
1256      }
1257 <
1257 >      
1258      // save the local cutoff group positions for the check that is
1259      // done on each loop:
1260      saved_CG_positions_.clear();
1261      for (int i = 0; i < nGroups_; i++)
1262        saved_CG_positions_.push_back(snap_->cgData.position[i]);
1263 <
1263 >    
1264      return neighborList;
1265    }
1266   } //end namespace OpenMD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines