ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing:
branches/development/src/parallel/ForceMatrixDecomposition.cpp (file contents), Revision 1588 by gezelter, Sat Jul 9 15:05:59 2011 UTC vs.
trunk/src/parallel/ForceMatrixDecomposition.cpp (file contents), Revision 2057 by gezelter, Tue Mar 3 15:22:26 2015 UTC

# Line 35 | Line 35
35   *                                                                      
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38 < * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
39 < * [4]  Vardeman & Gezelter, in progress (2009).                        
38 > * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).          
39 > * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40 > * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42   #include "parallel/ForceMatrixDecomposition.hpp"
43   #include "math/SquareMatrix3.hpp"
# Line 47 | Line 48 | namespace OpenMD {
48   using namespace std;
49   namespace OpenMD {
50  
51 +  ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52 +
53 +    // Row and colum scans must visit all surrounding cells
54 +    cellOffsets_.clear();
55 +    cellOffsets_.push_back( Vector3i(-1,-1,-1) );
56 +    cellOffsets_.push_back( Vector3i( 0,-1,-1) );
57 +    cellOffsets_.push_back( Vector3i( 1,-1,-1) );                          
58 +    cellOffsets_.push_back( Vector3i(-1, 0,-1) );
59 +    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
60 +    cellOffsets_.push_back( Vector3i( 1, 0,-1) );
61 +    cellOffsets_.push_back( Vector3i(-1, 1,-1) );
62 +    cellOffsets_.push_back( Vector3i( 0, 1,-1) );      
63 +    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
64 +    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
65 +    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
66 +    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
67 +    cellOffsets_.push_back( Vector3i(-1, 0, 0) );      
68 +    cellOffsets_.push_back( Vector3i( 0, 0, 0) );
69 +    cellOffsets_.push_back( Vector3i( 1, 0, 0) );
70 +    cellOffsets_.push_back( Vector3i(-1, 1, 0) );
71 +    cellOffsets_.push_back( Vector3i( 0, 1, 0) );
72 +    cellOffsets_.push_back( Vector3i( 1, 1, 0) );
73 +    cellOffsets_.push_back( Vector3i(-1,-1, 1) );
74 +    cellOffsets_.push_back( Vector3i( 0,-1, 1) );
75 +    cellOffsets_.push_back( Vector3i( 1,-1, 1) );
76 +    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
77 +    cellOffsets_.push_back( Vector3i( 0, 0, 1) );
78 +    cellOffsets_.push_back( Vector3i( 1, 0, 1) );
79 +    cellOffsets_.push_back( Vector3i(-1, 1, 1) );
80 +    cellOffsets_.push_back( Vector3i( 0, 1, 1) );
81 +    cellOffsets_.push_back( Vector3i( 1, 1, 1) );
82 +  }
83 +
84 +
85    /**
86     * distributeInitialData is essentially a copy of the older fortran
87     * SimulationSetup
88     */
54  
89    void ForceMatrixDecomposition::distributeInitialData() {
90      snap_ = sman_->getCurrentSnapshot();
91      storageLayout_ = sman_->getStorageLayout();
92      ff_ = info_->getForceField();
93      nLocal_ = snap_->getNumberOfAtoms();
94 <    
94 >  
95      nGroups_ = info_->getNLocalCutoffGroups();
96      // gather the information for atomtype IDs (atids):
97      idents = info_->getIdentArray();
98 +    regions = info_->getRegions();
99      AtomLocalToGlobal = info_->getGlobalAtomIndices();
100      cgLocalToGlobal = info_->getGlobalGroupIndices();
101      vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
# Line 71 | Line 106 | namespace OpenMD {
106      PairList* oneTwo = info_->getOneTwoInteractions();
107      PairList* oneThree = info_->getOneThreeInteractions();
108      PairList* oneFour = info_->getOneFourInteractions();
109 <
109 >    
110 >    if (needVelocities_)
111 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition |
112 >                                     DataStorage::dslVelocity);
113 >    else
114 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition);
115 >    
116   #ifdef IS_MPI
117  
118 <    AtomCommIntRow = new Communicator<Row,int>(nLocal_);
119 <    AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
79 <    AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
80 <    AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
81 <    AtomCommPotRow = new Communicator<Row,potVec>(nLocal_);
118 >    MPI_Comm row = rowComm.getComm();
119 >    MPI_Comm col = colComm.getComm();
120  
121 <    AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
122 <    AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
123 <    AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
124 <    AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
125 <    AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_);
121 >    AtomPlanIntRow = new Plan<int>(row, nLocal_);
122 >    AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
123 >    AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
124 >    AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
125 >    AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
126  
127 <    cgCommIntRow = new Communicator<Row,int>(nGroups_);
128 <    cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
129 <    cgCommIntColumn = new Communicator<Column,int>(nGroups_);
130 <    cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
127 >    AtomPlanIntColumn = new Plan<int>(col, nLocal_);
128 >    AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
129 >    AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
130 >    AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
131 >    AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
132  
133 <    nAtomsInRow_ = AtomCommIntRow->getSize();
134 <    nAtomsInCol_ = AtomCommIntColumn->getSize();
135 <    nGroupsInRow_ = cgCommIntRow->getSize();
136 <    nGroupsInCol_ = cgCommIntColumn->getSize();
133 >    cgPlanIntRow = new Plan<int>(row, nGroups_);
134 >    cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
135 >    cgPlanIntColumn = new Plan<int>(col, nGroups_);
136 >    cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
137  
138 +    nAtomsInRow_ = AtomPlanIntRow->getSize();
139 +    nAtomsInCol_ = AtomPlanIntColumn->getSize();
140 +    nGroupsInRow_ = cgPlanIntRow->getSize();
141 +    nGroupsInCol_ = cgPlanIntColumn->getSize();
142 +
143      // Modify the data storage objects with the correct layouts and sizes:
144      atomRowData.resize(nAtomsInRow_);
145      atomRowData.setStorageLayout(storageLayout_);
# Line 104 | Line 148 | namespace OpenMD {
148      cgRowData.resize(nGroupsInRow_);
149      cgRowData.setStorageLayout(DataStorage::dslPosition);
150      cgColData.resize(nGroupsInCol_);
151 <    cgColData.setStorageLayout(DataStorage::dslPosition);
152 <        
151 >    if (needVelocities_)
152 >      // we only need column velocities if we need them.
153 >      cgColData.setStorageLayout(DataStorage::dslPosition |
154 >                                 DataStorage::dslVelocity);
155 >    else    
156 >      cgColData.setStorageLayout(DataStorage::dslPosition);
157 >      
158      identsRow.resize(nAtomsInRow_);
159      identsCol.resize(nAtomsInCol_);
160      
161 <    AtomCommIntRow->gather(idents, identsRow);
162 <    AtomCommIntColumn->gather(idents, identsCol);
161 >    AtomPlanIntRow->gather(idents, identsRow);
162 >    AtomPlanIntColumn->gather(idents, identsCol);
163 >
164 >    regionsRow.resize(nAtomsInRow_);
165 >    regionsCol.resize(nAtomsInCol_);
166      
167 <    vector<int>::iterator it;
168 <    for (it = AtomLocalToGlobal.begin(); it != AtomLocalToGlobal.end(); ++it) {
117 <      cerr << "my AtomLocalToGlobal = " << (*it) << "\n";
118 <    }
119 <    AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
120 <    AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
167 >    AtomPlanIntRow->gather(regions, regionsRow);
168 >    AtomPlanIntColumn->gather(regions, regionsCol);
169      
170 <    cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
171 <    cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
170 >    // allocate memory for the parallel objects
171 >    atypesRow.resize(nAtomsInRow_);
172 >    atypesCol.resize(nAtomsInCol_);
173  
174 <    AtomCommRealRow->gather(massFactors, massFactorsRow);
175 <    AtomCommRealColumn->gather(massFactors, massFactorsCol);
174 >    for (int i = 0; i < nAtomsInRow_; i++)
175 >      atypesRow[i] = ff_->getAtomType(identsRow[i]);
176 >    for (int i = 0; i < nAtomsInCol_; i++)
177 >      atypesCol[i] = ff_->getAtomType(identsCol[i]);        
178  
179 +    pot_row.resize(nAtomsInRow_);
180 +    pot_col.resize(nAtomsInCol_);
181 +
182 +    expot_row.resize(nAtomsInRow_);
183 +    expot_col.resize(nAtomsInCol_);
184 +
185 +    AtomRowToGlobal.resize(nAtomsInRow_);
186 +    AtomColToGlobal.resize(nAtomsInCol_);
187 +    AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
188 +    AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
189 +
190 +    cgRowToGlobal.resize(nGroupsInRow_);
191 +    cgColToGlobal.resize(nGroupsInCol_);
192 +    cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
193 +    cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
194 +
195 +    massFactorsRow.resize(nAtomsInRow_);
196 +    massFactorsCol.resize(nAtomsInCol_);
197 +    AtomPlanRealRow->gather(massFactors, massFactorsRow);
198 +    AtomPlanRealColumn->gather(massFactors, massFactorsCol);
199 +
200      groupListRow_.clear();
201      groupListRow_.resize(nGroupsInRow_);
202      for (int i = 0; i < nGroupsInRow_; i++) {
# Line 179 | Line 251 | namespace OpenMD {
251        }      
252      }
253  
254 < #endif
183 <
184 <    groupList_.clear();
185 <    groupList_.resize(nGroups_);
186 <    for (int i = 0; i < nGroups_; i++) {
187 <      int gid = cgLocalToGlobal[i];
188 <      for (int j = 0; j < nLocal_; j++) {
189 <        int aid = AtomLocalToGlobal[j];
190 <        if (globalGroupMembership[aid] == gid) {
191 <          groupList_[i].push_back(j);
192 <        }
193 <      }      
194 <    }
195 <
254 > #else
255      excludesForAtom.clear();
256      excludesForAtom.resize(nLocal_);
257      toposForAtom.clear();
# Line 225 | Line 284 | namespace OpenMD {
284          }
285        }      
286      }
287 <    
229 <    createGtypeCutoffMap();
287 > #endif
288  
289 +    // allocate memory for the parallel objects
290 +    atypesLocal.resize(nLocal_);
291 +
292 +    for (int i = 0; i < nLocal_; i++)
293 +      atypesLocal[i] = ff_->getAtomType(idents[i]);
294 +
295 +    groupList_.clear();
296 +    groupList_.resize(nGroups_);
297 +    for (int i = 0; i < nGroups_; i++) {
298 +      int gid = cgLocalToGlobal[i];
299 +      for (int j = 0; j < nLocal_; j++) {
300 +        int aid = AtomLocalToGlobal[j];
301 +        if (globalGroupMembership[aid] == gid) {
302 +          groupList_[i].push_back(j);
303 +        }
304 +      }      
305 +    }    
306    }
232  
233  void ForceMatrixDecomposition::createGtypeCutoffMap() {
307      
308 <    RealType tol = 1e-6;
309 <    RealType rc;
237 <    int atid;
238 <    set<AtomType*> atypes = info_->getSimulatedAtomTypes();
239 <    map<int, RealType> atypeCutoff;
240 <      
241 <    for (set<AtomType*>::iterator at = atypes.begin();
242 <         at != atypes.end(); ++at){
243 <      atid = (*at)->getIdent();
244 <      if (userChoseCutoff_)
245 <        atypeCutoff[atid] = userCutoff_;
246 <      else
247 <        atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
248 <    }
249 <
250 <    vector<RealType> gTypeCutoffs;
251 <    // first we do a single loop over the cutoff groups to find the
252 <    // largest cutoff for any atypes present in this group.
253 < #ifdef IS_MPI
254 <    vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
255 <    groupRowToGtype.resize(nGroupsInRow_);
256 <    for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
257 <      vector<int> atomListRow = getAtomsInGroupRow(cg1);
258 <      for (vector<int>::iterator ia = atomListRow.begin();
259 <           ia != atomListRow.end(); ++ia) {            
260 <        int atom1 = (*ia);
261 <        atid = identsRow[atom1];
262 <        if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
263 <          groupCutoffRow[cg1] = atypeCutoff[atid];
264 <        }
265 <      }
266 <
267 <      bool gTypeFound = false;
268 <      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
269 <        if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
270 <          groupRowToGtype[cg1] = gt;
271 <          gTypeFound = true;
272 <        }
273 <      }
274 <      if (!gTypeFound) {
275 <        gTypeCutoffs.push_back( groupCutoffRow[cg1] );
276 <        groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
277 <      }
278 <      
279 <    }
280 <    vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
281 <    groupColToGtype.resize(nGroupsInCol_);
282 <    for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
283 <      vector<int> atomListCol = getAtomsInGroupColumn(cg2);
284 <      for (vector<int>::iterator jb = atomListCol.begin();
285 <           jb != atomListCol.end(); ++jb) {            
286 <        int atom2 = (*jb);
287 <        atid = identsCol[atom2];
288 <        if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
289 <          groupCutoffCol[cg2] = atypeCutoff[atid];
290 <        }
291 <      }
292 <      bool gTypeFound = false;
293 <      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
294 <        if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
295 <          groupColToGtype[cg2] = gt;
296 <          gTypeFound = true;
297 <        }
298 <      }
299 <      if (!gTypeFound) {
300 <        gTypeCutoffs.push_back( groupCutoffCol[cg2] );
301 <        groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
302 <      }
303 <    }
304 < #else
305 <
306 <    vector<RealType> groupCutoff(nGroups_, 0.0);
307 <    groupToGtype.resize(nGroups_);
308 <    for (int cg1 = 0; cg1 < nGroups_; cg1++) {
309 <
310 <      groupCutoff[cg1] = 0.0;
311 <      vector<int> atomList = getAtomsInGroupRow(cg1);
312 <
313 <      for (vector<int>::iterator ia = atomList.begin();
314 <           ia != atomList.end(); ++ia) {            
315 <        int atom1 = (*ia);
316 <        atid = idents[atom1];
317 <        if (atypeCutoff[atid] > groupCutoff[cg1]) {
318 <          groupCutoff[cg1] = atypeCutoff[atid];
319 <        }
320 <      }
321 <
322 <      bool gTypeFound = false;
323 <      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
324 <        if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
325 <          groupToGtype[cg1] = gt;
326 <          gTypeFound = true;
327 <        }
328 <      }
329 <      if (!gTypeFound) {
330 <        gTypeCutoffs.push_back( groupCutoff[cg1] );
331 <        groupToGtype[cg1] = gTypeCutoffs.size() - 1;
332 <      }      
333 <    }
334 < #endif
335 <
336 <    // Now we find the maximum group cutoff value present in the simulation
337 <
338 <    RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end());
339 <
340 < #ifdef IS_MPI
341 <    MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX);
342 < #endif
343 <    
344 <    RealType tradRcut = groupMax;
345 <
346 <    for (int i = 0; i < gTypeCutoffs.size();  i++) {
347 <      for (int j = 0; j < gTypeCutoffs.size();  j++) {      
348 <        RealType thisRcut;
349 <        switch(cutoffPolicy_) {
350 <        case TRADITIONAL:
351 <          thisRcut = tradRcut;
352 <          break;
353 <        case MIX:
354 <          thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
355 <          break;
356 <        case MAX:
357 <          thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
358 <          break;
359 <        default:
360 <          sprintf(painCave.errMsg,
361 <                  "ForceMatrixDecomposition::createGtypeCutoffMap "
362 <                  "hit an unknown cutoff policy!\n");
363 <          painCave.severity = OPENMD_ERROR;
364 <          painCave.isFatal = 1;
365 <          simError();
366 <          break;
367 <        }
368 <
369 <        pair<int,int> key = make_pair(i,j);
370 <        gTypeCutoffMap[key].first = thisRcut;
371 <
372 <        if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
373 <
374 <        gTypeCutoffMap[key].second = thisRcut*thisRcut;
375 <        
376 <        gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
377 <
378 <        // sanity check
379 <        
380 <        if (userChoseCutoff_) {
381 <          if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
382 <            sprintf(painCave.errMsg,
383 <                    "ForceMatrixDecomposition::createGtypeCutoffMap "
384 <                    "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
385 <            painCave.severity = OPENMD_ERROR;
386 <            painCave.isFatal = 1;
387 <            simError();            
388 <          }
389 <        }
390 <      }
391 <    }
392 <  }
393 <
394 <
395 <  groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
396 <    int i, j;  
397 < #ifdef IS_MPI
398 <    i = groupRowToGtype[cg1];
399 <    j = groupColToGtype[cg2];
400 < #else
401 <    i = groupToGtype[cg1];
402 <    j = groupToGtype[cg2];
403 < #endif    
404 <    return gTypeCutoffMap[make_pair(i,j)];
405 <  }
406 <
407 <  int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
408 <    for (int j = 0; j < toposForAtom[atom1].size(); j++) {
308 >  int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
309 >    for (unsigned int j = 0; j < toposForAtom[atom1].size(); j++) {
310        if (toposForAtom[atom1][j] == atom2)
311          return topoDist[atom1][j];
312 <    }
312 >    }                                          
313      return 0;
314    }
315  
316    void ForceMatrixDecomposition::zeroWorkArrays() {
317      pairwisePot = 0.0;
318      embeddingPot = 0.0;
319 +    excludedPot = 0.0;
320 +    excludedSelfPot = 0.0;
321  
322   #ifdef IS_MPI
323      if (storageLayout_ & DataStorage::dslForce) {
# Line 433 | Line 336 | namespace OpenMD {
336      fill(pot_col.begin(), pot_col.end(),
337           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
338  
339 +    fill(expot_row.begin(), expot_row.end(),
340 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
341 +
342 +    fill(expot_col.begin(), expot_col.end(),
343 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
344 +
345      if (storageLayout_ & DataStorage::dslParticlePot) {    
346 <      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0);
347 <      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0);
346 >      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
347 >           0.0);
348 >      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
349 >           0.0);
350      }
351  
352      if (storageLayout_ & DataStorage::dslDensity) {      
# Line 444 | Line 355 | namespace OpenMD {
355      }
356  
357      if (storageLayout_ & DataStorage::dslFunctional) {  
358 <      fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0);
359 <      fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0);
358 >      fill(atomRowData.functional.begin(), atomRowData.functional.end(),
359 >           0.0);
360 >      fill(atomColData.functional.begin(), atomColData.functional.end(),
361 >           0.0);
362      }
363  
364      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
# Line 462 | Line 375 | namespace OpenMD {
375             atomColData.skippedCharge.end(), 0.0);
376      }
377  
378 < #else
379 <    
378 >    if (storageLayout_ & DataStorage::dslFlucQForce) {      
379 >      fill(atomRowData.flucQFrc.begin(),
380 >           atomRowData.flucQFrc.end(), 0.0);
381 >      fill(atomColData.flucQFrc.begin(),
382 >           atomColData.flucQFrc.end(), 0.0);
383 >    }
384 >
385 >    if (storageLayout_ & DataStorage::dslElectricField) {    
386 >      fill(atomRowData.electricField.begin(),
387 >           atomRowData.electricField.end(), V3Zero);
388 >      fill(atomColData.electricField.begin(),
389 >           atomColData.electricField.end(), V3Zero);
390 >    }
391 >
392 >    if (storageLayout_ & DataStorage::dslSitePotential) {    
393 >      fill(atomRowData.sitePotential.begin(),
394 >           atomRowData.sitePotential.end(), 0.0);
395 >      fill(atomColData.sitePotential.begin(),
396 >           atomColData.sitePotential.end(), 0.0);
397 >    }
398 >
399 > #endif
400 >    // even in parallel, we need to zero out the local arrays:
401 >
402      if (storageLayout_ & DataStorage::dslParticlePot) {      
403        fill(snap_->atomData.particlePot.begin(),
404             snap_->atomData.particlePot.end(), 0.0);
# Line 473 | Line 408 | namespace OpenMD {
408        fill(snap_->atomData.density.begin(),
409             snap_->atomData.density.end(), 0.0);
410      }
411 +
412      if (storageLayout_ & DataStorage::dslFunctional) {
413        fill(snap_->atomData.functional.begin(),
414             snap_->atomData.functional.end(), 0.0);
415      }
416 +
417      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
418        fill(snap_->atomData.functionalDerivative.begin(),
419             snap_->atomData.functionalDerivative.end(), 0.0);
420      }
421 +
422      if (storageLayout_ & DataStorage::dslSkippedCharge) {      
423        fill(snap_->atomData.skippedCharge.begin(),
424             snap_->atomData.skippedCharge.end(), 0.0);
425      }
426 < #endif
427 <    
426 >
427 >    if (storageLayout_ & DataStorage::dslElectricField) {      
428 >      fill(snap_->atomData.electricField.begin(),
429 >           snap_->atomData.electricField.end(), V3Zero);
430 >    }
431 >    if (storageLayout_ & DataStorage::dslSitePotential) {      
432 >      fill(snap_->atomData.sitePotential.begin(),
433 >           snap_->atomData.sitePotential.end(), 0.0);
434 >    }
435    }
436  
437  
438    void ForceMatrixDecomposition::distributeData()  {
439      snap_ = sman_->getCurrentSnapshot();
440      storageLayout_ = sman_->getStorageLayout();
441 +
442 +    bool needsCG = true;
443 +    if(info_->getNCutoffGroups() != info_->getNAtoms())
444 +      needsCG = false;
445 +  
446   #ifdef IS_MPI
447      
448      // gather up the atomic positions
449 <    AtomCommVectorRow->gather(snap_->atomData.position,
449 >    AtomPlanVectorRow->gather(snap_->atomData.position,
450                                atomRowData.position);
451 <    AtomCommVectorColumn->gather(snap_->atomData.position,
451 >    AtomPlanVectorColumn->gather(snap_->atomData.position,
452                                   atomColData.position);
453      
454      // gather up the cutoff group positions
455 <    cgCommVectorRow->gather(snap_->cgData.position,
456 <                            cgRowData.position);
457 <    cgCommVectorColumn->gather(snap_->cgData.position,
458 <                               cgColData.position);
455 >
456 >    if (needsCG) {
457 >      cgPlanVectorRow->gather(snap_->cgData.position,
458 >                              cgRowData.position);
459 >      
460 >      cgPlanVectorColumn->gather(snap_->cgData.position,
461 >                                 cgColData.position);
462 >    }
463 >
464 >
465 >    if (needVelocities_) {
466 >      // gather up the atomic velocities
467 >      AtomPlanVectorColumn->gather(snap_->atomData.velocity,
468 >                                   atomColData.velocity);
469 >
470 >      if (needsCG) {        
471 >        cgPlanVectorColumn->gather(snap_->cgData.velocity,
472 >                                   cgColData.velocity);
473 >      }
474 >    }
475 >
476      
477      // if needed, gather the atomic rotation matrices
478      if (storageLayout_ & DataStorage::dslAmat) {
479 <      AtomCommMatrixRow->gather(snap_->atomData.aMat,
479 >      AtomPlanMatrixRow->gather(snap_->atomData.aMat,
480                                  atomRowData.aMat);
481 <      AtomCommMatrixColumn->gather(snap_->atomData.aMat,
481 >      AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
482                                     atomColData.aMat);
483      }
484 <    
485 <    // if needed, gather the atomic eletrostatic frames
486 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
487 <      AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
488 <                                atomRowData.electroFrame);
489 <      AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
490 <                                   atomColData.electroFrame);
484 >
485 >    // if needed, gather the atomic eletrostatic information
486 >    if (storageLayout_ & DataStorage::dslDipole) {
487 >      AtomPlanVectorRow->gather(snap_->atomData.dipole,
488 >                                atomRowData.dipole);
489 >      AtomPlanVectorColumn->gather(snap_->atomData.dipole,
490 >                                   atomColData.dipole);
491      }
492 +
493 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
494 +      AtomPlanMatrixRow->gather(snap_->atomData.quadrupole,
495 +                                atomRowData.quadrupole);
496 +      AtomPlanMatrixColumn->gather(snap_->atomData.quadrupole,
497 +                                   atomColData.quadrupole);
498 +    }
499 +        
500 +    // if needed, gather the atomic fluctuating charge values
501 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {
502 +      AtomPlanRealRow->gather(snap_->atomData.flucQPos,
503 +                              atomRowData.flucQPos);
504 +      AtomPlanRealColumn->gather(snap_->atomData.flucQPos,
505 +                                 atomColData.flucQPos);
506 +    }
507 +
508   #endif      
509    }
510    
# Line 535 | Line 518 | namespace OpenMD {
518      
519      if (storageLayout_ & DataStorage::dslDensity) {
520        
521 <      AtomCommRealRow->scatter(atomRowData.density,
521 >      AtomPlanRealRow->scatter(atomRowData.density,
522                                 snap_->atomData.density);
523        
524        int n = snap_->atomData.density.size();
525        vector<RealType> rho_tmp(n, 0.0);
526 <      AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
526 >      AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
527        for (int i = 0; i < n; i++)
528          snap_->atomData.density[i] += rho_tmp[i];
529      }
530 +
531 +    // this isn't necessary if we don't have polarizable atoms, but
532 +    // we'll leave it here for now.
533 +    if (storageLayout_ & DataStorage::dslElectricField) {
534 +      
535 +      AtomPlanVectorRow->scatter(atomRowData.electricField,
536 +                                 snap_->atomData.electricField);
537 +      
538 +      int n = snap_->atomData.electricField.size();
539 +      vector<Vector3d> field_tmp(n, V3Zero);
540 +      AtomPlanVectorColumn->scatter(atomColData.electricField,
541 +                                    field_tmp);
542 +      for (int i = 0; i < n; i++)
543 +        snap_->atomData.electricField[i] += field_tmp[i];
544 +    }
545   #endif
546    }
547  
# Line 556 | Line 554 | namespace OpenMD {
554      storageLayout_ = sman_->getStorageLayout();
555   #ifdef IS_MPI
556      if (storageLayout_ & DataStorage::dslFunctional) {
557 <      AtomCommRealRow->gather(snap_->atomData.functional,
557 >      AtomPlanRealRow->gather(snap_->atomData.functional,
558                                atomRowData.functional);
559 <      AtomCommRealColumn->gather(snap_->atomData.functional,
559 >      AtomPlanRealColumn->gather(snap_->atomData.functional,
560                                   atomColData.functional);
561      }
562      
563      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
564 <      AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
564 >      AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
565                                atomRowData.functionalDerivative);
566 <      AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
566 >      AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
567                                   atomColData.functionalDerivative);
568      }
569   #endif
# Line 579 | Line 577 | namespace OpenMD {
577      int n = snap_->atomData.force.size();
578      vector<Vector3d> frc_tmp(n, V3Zero);
579      
580 <    AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
580 >    AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
581      for (int i = 0; i < n; i++) {
582        snap_->atomData.force[i] += frc_tmp[i];
583        frc_tmp[i] = 0.0;
584      }
585      
586 <    AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
587 <    for (int i = 0; i < n; i++)
586 >    AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
587 >    for (int i = 0; i < n; i++) {
588        snap_->atomData.force[i] += frc_tmp[i];
589 <    
590 <    
589 >    }
590 >        
591      if (storageLayout_ & DataStorage::dslTorque) {
592  
593        int nt = snap_->atomData.torque.size();
594        vector<Vector3d> trq_tmp(nt, V3Zero);
595  
596 <      AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
596 >      AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
597        for (int i = 0; i < nt; i++) {
598          snap_->atomData.torque[i] += trq_tmp[i];
599          trq_tmp[i] = 0.0;
600        }
601        
602 <      AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
602 >      AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
603        for (int i = 0; i < nt; i++)
604          snap_->atomData.torque[i] += trq_tmp[i];
605      }
# Line 611 | Line 609 | namespace OpenMD {
609        int ns = snap_->atomData.skippedCharge.size();
610        vector<RealType> skch_tmp(ns, 0.0);
611  
612 <      AtomCommRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
612 >      AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
613        for (int i = 0; i < ns; i++) {
614 <        snap_->atomData.skippedCharge[i] = skch_tmp[i];
614 >        snap_->atomData.skippedCharge[i] += skch_tmp[i];
615          skch_tmp[i] = 0.0;
616        }
617        
618 <      AtomCommRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
619 <      for (int i = 0; i < ns; i++)
618 >      AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
619 >      for (int i = 0; i < ns; i++)
620          snap_->atomData.skippedCharge[i] += skch_tmp[i];
621 +            
622      }
623      
624 +    if (storageLayout_ & DataStorage::dslFlucQForce) {
625 +
626 +      int nq = snap_->atomData.flucQFrc.size();
627 +      vector<RealType> fqfrc_tmp(nq, 0.0);
628 +
629 +      AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp);
630 +      for (int i = 0; i < nq; i++) {
631 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
632 +        fqfrc_tmp[i] = 0.0;
633 +      }
634 +      
635 +      AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp);
636 +      for (int i = 0; i < nq; i++)
637 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
638 +            
639 +    }
640 +
641 +    if (storageLayout_ & DataStorage::dslElectricField) {
642 +
643 +      int nef = snap_->atomData.electricField.size();
644 +      vector<Vector3d> efield_tmp(nef, V3Zero);
645 +
646 +      AtomPlanVectorRow->scatter(atomRowData.electricField, efield_tmp);
647 +      for (int i = 0; i < nef; i++) {
648 +        snap_->atomData.electricField[i] += efield_tmp[i];
649 +        efield_tmp[i] = 0.0;
650 +      }
651 +      
652 +      AtomPlanVectorColumn->scatter(atomColData.electricField, efield_tmp);
653 +      for (int i = 0; i < nef; i++)
654 +        snap_->atomData.electricField[i] += efield_tmp[i];
655 +    }
656 +
657 +    if (storageLayout_ & DataStorage::dslSitePotential) {
658 +
659 +      int nsp = snap_->atomData.sitePotential.size();
660 +      vector<RealType> sp_tmp(nsp, 0.0);
661 +
662 +      AtomPlanRealRow->scatter(atomRowData.sitePotential, sp_tmp);
663 +      for (int i = 0; i < nsp; i++) {
664 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
665 +        sp_tmp[i] = 0.0;
666 +      }
667 +      
668 +      AtomPlanRealColumn->scatter(atomColData.sitePotential, sp_tmp);
669 +      for (int i = 0; i < nsp; i++)
670 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
671 +    }
672 +
673      nLocal_ = snap_->getNumberOfAtoms();
674  
675      vector<potVec> pot_temp(nLocal_,
676                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
677 +    vector<potVec> expot_temp(nLocal_,
678 +                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
679  
680      // scatter/gather pot_row into the members of my column
681            
682 <    AtomCommPotRow->scatter(pot_row, pot_temp);
682 >    AtomPlanPotRow->scatter(pot_row, pot_temp);
683 >    AtomPlanPotRow->scatter(expot_row, expot_temp);
684  
685 <    for (int ii = 0;  ii < pot_temp.size(); ii++ )
685 >    for (int ii = 0;  ii < pot_temp.size(); ii++ )
686        pairwisePot += pot_temp[ii];
687 <    
687 >
688 >    for (int ii = 0;  ii < expot_temp.size(); ii++ )
689 >      excludedPot += expot_temp[ii];
690 >        
691 >    if (storageLayout_ & DataStorage::dslParticlePot) {
692 >      // This is the pairwise contribution to the particle pot.  The
693 >      // embedding contribution is added in each of the low level
694 >      // non-bonded routines.  In single processor, this is done in
695 >      // unpackInteractionData, not in collectData.
696 >      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
697 >        for (int i = 0; i < nLocal_; i++) {
698 >          // factor of two is because the total potential terms are divided
699 >          // by 2 in parallel due to row/ column scatter      
700 >          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
701 >        }
702 >      }
703 >    }
704 >
705      fill(pot_temp.begin(), pot_temp.end(),
706           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
707 +    fill(expot_temp.begin(), expot_temp.end(),
708 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
709        
710 <    AtomCommPotColumn->scatter(pot_col, pot_temp);    
710 >    AtomPlanPotColumn->scatter(pot_col, pot_temp);    
711 >    AtomPlanPotColumn->scatter(expot_col, expot_temp);    
712      
713      for (int ii = 0;  ii < pot_temp.size(); ii++ )
714        pairwisePot += pot_temp[ii];    
715 +
716 +    for (int ii = 0;  ii < expot_temp.size(); ii++ )
717 +      excludedPot += expot_temp[ii];    
718 +
719 +    if (storageLayout_ & DataStorage::dslParticlePot) {
720 +      // This is the pairwise contribution to the particle pot.  The
721 +      // embedding contribution is added in each of the low level
722 +      // non-bonded routines.  In single processor, this is done in
723 +      // unpackInteractionData, not in collectData.
724 +      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
725 +        for (int i = 0; i < nLocal_; i++) {
726 +          // factor of two is because the total potential terms are divided
727 +          // by 2 in parallel due to row/ column scatter      
728 +          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
729 +        }
730 +      }
731 +    }
732 +    
733 +    if (storageLayout_ & DataStorage::dslParticlePot) {
734 +      int npp = snap_->atomData.particlePot.size();
735 +      vector<RealType> ppot_temp(npp, 0.0);
736 +
737 +      // This is the direct or embedding contribution to the particle
738 +      // pot.
739 +      
740 +      AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp);
741 +      for (int i = 0; i < npp; i++) {
742 +        snap_->atomData.particlePot[i] += ppot_temp[i];
743 +      }
744 +
745 +      fill(ppot_temp.begin(), ppot_temp.end(), 0.0);
746 +      
747 +      AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp);
748 +      for (int i = 0; i < npp; i++) {
749 +        snap_->atomData.particlePot[i] += ppot_temp[i];
750 +      }
751 +    }
752 +
753 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
754 +      RealType ploc1 = pairwisePot[ii];
755 +      RealType ploc2 = 0.0;
756 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
757 +      pairwisePot[ii] = ploc2;
758 +    }
759 +
760 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
761 +      RealType ploc1 = excludedPot[ii];
762 +      RealType ploc2 = 0.0;
763 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
764 +      excludedPot[ii] = ploc2;
765 +    }
766 +
767 +    // Here be dragons.
768 +    MPI_Comm col = colComm.getComm();
769 +
770 +    MPI_Allreduce(MPI_IN_PLACE,
771 +                  &snap_->frameData.conductiveHeatFlux[0], 3,
772 +                  MPI_REALTYPE, MPI_SUM, col);
773 +
774 +
775   #endif
776  
777    }
778  
779 <  int ForceMatrixDecomposition::getNAtomsInRow() {  
779 >  /**
780 >   * Collects information obtained during the post-pair (and embedding
781 >   * functional) loops onto local data structures.
782 >   */
783 >  void ForceMatrixDecomposition::collectSelfData() {
784 >    snap_ = sman_->getCurrentSnapshot();
785 >    storageLayout_ = sman_->getStorageLayout();
786 >
787   #ifdef IS_MPI
788 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
789 +      RealType ploc1 = embeddingPot[ii];
790 +      RealType ploc2 = 0.0;
791 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
792 +      embeddingPot[ii] = ploc2;
793 +    }    
794 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
795 +      RealType ploc1 = excludedSelfPot[ii];
796 +      RealType ploc2 = 0.0;
797 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
798 +      excludedSelfPot[ii] = ploc2;
799 +    }    
800 + #endif
801 +    
802 +  }
803 +
804 +
805 +
806 +  int& ForceMatrixDecomposition::getNAtomsInRow() {  
807 + #ifdef IS_MPI
808      return nAtomsInRow_;
809   #else
810      return nLocal_;
# Line 656 | Line 814 | namespace OpenMD {
814    /**
815     * returns the list of atoms belonging to this group.  
816     */
817 <  vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
817 >  vector<int>& ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
818   #ifdef IS_MPI
819      return groupListRow_[cg1];
820   #else
# Line 664 | Line 822 | namespace OpenMD {
822   #endif
823    }
824  
825 <  vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
825 >  vector<int>& ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
826   #ifdef IS_MPI
827      return groupListCol_[cg2];
828   #else
# Line 681 | Line 839 | namespace OpenMD {
839      d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
840   #endif
841      
842 <    snap_->wrapVector(d);
842 >    if (usePeriodicBoundaryConditions_) {
843 >      snap_->wrapVector(d);
844 >    }
845      return d;    
846    }
847  
848 +  Vector3d& ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){
849 + #ifdef IS_MPI
850 +    return cgColData.velocity[cg2];
851 + #else
852 +    return snap_->cgData.velocity[cg2];
853 + #endif
854 +  }
855  
856 +  Vector3d& ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){
857 + #ifdef IS_MPI
858 +    return atomColData.velocity[atom2];
859 + #else
860 +    return snap_->atomData.velocity[atom2];
861 + #endif
862 +  }
863 +
864 +
865    Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
866  
867      Vector3d d;
# Line 695 | Line 871 | namespace OpenMD {
871   #else
872      d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
873   #endif
874 <
875 <    snap_->wrapVector(d);
874 >    if (usePeriodicBoundaryConditions_) {
875 >      snap_->wrapVector(d);
876 >    }
877      return d;    
878    }
879    
# Line 708 | Line 885 | namespace OpenMD {
885   #else
886      d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
887   #endif
888 <    
889 <    snap_->wrapVector(d);
888 >    if (usePeriodicBoundaryConditions_) {
889 >      snap_->wrapVector(d);
890 >    }
891      return d;    
892    }
893  
894 <  RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
894 >  RealType& ForceMatrixDecomposition::getMassFactorRow(int atom1) {
895   #ifdef IS_MPI
896      return massFactorsRow[atom1];
897   #else
# Line 721 | Line 899 | namespace OpenMD {
899   #endif
900    }
901  
902 <  RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
902 >  RealType& ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
903   #ifdef IS_MPI
904      return massFactorsCol[atom2];
905   #else
# Line 738 | Line 916 | namespace OpenMD {
916   #else
917      d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
918   #endif
919 <
920 <    snap_->wrapVector(d);
919 >    if (usePeriodicBoundaryConditions_) {
920 >      snap_->wrapVector(d);
921 >    }
922      return d;    
923    }
924  
925 <  vector<int> ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
925 >  vector<int>& ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
926      return excludesForAtom[atom1];
927    }
928  
# Line 751 | Line 930 | namespace OpenMD {
930     * We need to exclude some overcounted interactions that result from
931     * the parallel decomposition.
932     */
933 <  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
933 >  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) {
934      int unique_id_1, unique_id_2;
935 <
935 >        
936   #ifdef IS_MPI
937      // in MPI, we have to look up the unique IDs for each atom
938      unique_id_1 = AtomRowToGlobal[atom1];
939      unique_id_2 = AtomColToGlobal[atom2];
940 +    // group1 = cgRowToGlobal[cg1];
941 +    // group2 = cgColToGlobal[cg2];
942 + #else
943 +    unique_id_1 = AtomLocalToGlobal[atom1];
944 +    unique_id_2 = AtomLocalToGlobal[atom2];
945 +    int group1 = cgLocalToGlobal[cg1];
946 +    int group2 = cgLocalToGlobal[cg2];
947 + #endif  
948  
762    // this situation should only arise in MPI simulations
949      if (unique_id_1 == unique_id_2) return true;
950 <    
950 >
951 > #ifdef IS_MPI
952      // this prevents us from doing the pair on multiple processors
953      if (unique_id_1 < unique_id_2) {
954        if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
955      } else {
956 <      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
956 >      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
957      }
958 + #endif    
959 +
960 + #ifndef IS_MPI
961 +    if (group1 == group2) {
962 +      if (unique_id_1 < unique_id_2) return true;
963 +    }
964   #endif
965 +    
966      return false;
967    }
968  
# Line 782 | Line 976 | namespace OpenMD {
976     * field) must still be handled for these pairs.
977     */
978    bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
979 <    int unique_id_2;
979 >
980 >    // excludesForAtom was constructed to use row/column indices in the MPI
981 >    // version, and to use local IDs in the non-MPI version:
982      
787 #ifdef IS_MPI
788    // in MPI, we have to look up the unique IDs for the row atom.
789    unique_id_2 = AtomColToGlobal[atom2];
790 #else
791    // in the normal loop, the atom numbers are unique
792    unique_id_2 = atom2;
793 #endif
794    
983      for (vector<int>::iterator i = excludesForAtom[atom1].begin();
984           i != excludesForAtom[atom1].end(); ++i) {
985 <      if ( (*i) == unique_id_2 ) return true;
985 >      if ( (*i) == atom2 ) return true;
986      }
987  
988      return false;
# Line 819 | Line 1007 | namespace OpenMD {
1007  
1008      // filling interaction blocks with pointers
1009    void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
1010 <                                                     int atom1, int atom2) {
1010 >                                                     int atom1, int atom2,
1011 >                                                     bool newAtom1) {
1012  
1013      idat.excluded = excludeAtomPair(atom1, atom2);
1014 <  
1014 >
1015 >    if (newAtom1) {
1016 >      
1017   #ifdef IS_MPI
1018 <    
1019 <    idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
1020 <                             ff_->getAtomType(identsCol[atom2]) );
1021 <    
1018 >      idat.atid1 = identsRow[atom1];
1019 >      idat.atid2 = identsCol[atom2];
1020 >      
1021 >      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1022 >        idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1023 >      } else {
1024 >        idat.sameRegion = false;
1025 >      }
1026 >      
1027 >      if (storageLayout_ & DataStorage::dslAmat) {
1028 >        idat.A1 = &(atomRowData.aMat[atom1]);
1029 >        idat.A2 = &(atomColData.aMat[atom2]);
1030 >      }
1031 >      
1032 >      if (storageLayout_ & DataStorage::dslTorque) {
1033 >        idat.t1 = &(atomRowData.torque[atom1]);
1034 >        idat.t2 = &(atomColData.torque[atom2]);
1035 >      }
1036 >      
1037 >      if (storageLayout_ & DataStorage::dslDipole) {
1038 >        idat.dipole1 = &(atomRowData.dipole[atom1]);
1039 >        idat.dipole2 = &(atomColData.dipole[atom2]);
1040 >      }
1041 >      
1042 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1043 >        idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1044 >        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1045 >      }
1046 >      
1047 >      if (storageLayout_ & DataStorage::dslDensity) {
1048 >        idat.rho1 = &(atomRowData.density[atom1]);
1049 >        idat.rho2 = &(atomColData.density[atom2]);
1050 >      }
1051 >      
1052 >      if (storageLayout_ & DataStorage::dslFunctional) {
1053 >        idat.frho1 = &(atomRowData.functional[atom1]);
1054 >        idat.frho2 = &(atomColData.functional[atom2]);
1055 >      }
1056 >      
1057 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1058 >        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1059 >        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1060 >      }
1061 >      
1062 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1063 >        idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1064 >        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1065 >      }
1066 >      
1067 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1068 >        idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1069 >        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1070 >      }
1071 >      
1072 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1073 >        idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1074 >        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1075 >      }
1076 >      
1077 > #else
1078 >      
1079 >      idat.atid1 = idents[atom1];
1080 >      idat.atid2 = idents[atom2];
1081 >      
1082 >      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1083 >        idat.sameRegion = (regions[atom1] == regions[atom2]);
1084 >      } else {
1085 >        idat.sameRegion = false;
1086 >      }
1087 >      
1088 >      if (storageLayout_ & DataStorage::dslAmat) {
1089 >        idat.A1 = &(snap_->atomData.aMat[atom1]);
1090 >        idat.A2 = &(snap_->atomData.aMat[atom2]);
1091 >      }
1092 >      
1093 >      if (storageLayout_ & DataStorage::dslTorque) {
1094 >        idat.t1 = &(snap_->atomData.torque[atom1]);
1095 >        idat.t2 = &(snap_->atomData.torque[atom2]);
1096 >      }
1097 >      
1098 >      if (storageLayout_ & DataStorage::dslDipole) {
1099 >        idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1100 >        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1101 >      }
1102 >      
1103 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1104 >        idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1105 >        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1106 >      }
1107 >      
1108 >      if (storageLayout_ & DataStorage::dslDensity) {    
1109 >        idat.rho1 = &(snap_->atomData.density[atom1]);
1110 >        idat.rho2 = &(snap_->atomData.density[atom2]);
1111 >      }
1112 >      
1113 >      if (storageLayout_ & DataStorage::dslFunctional) {
1114 >        idat.frho1 = &(snap_->atomData.functional[atom1]);
1115 >        idat.frho2 = &(snap_->atomData.functional[atom2]);
1116 >      }
1117 >      
1118 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1119 >        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1120 >        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1121 >      }
1122 >      
1123 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1124 >        idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1125 >        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1126 >      }
1127 >      
1128 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1129 >        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1130 >        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1131 >      }
1132 >      
1133 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1134 >        idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1135 >        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1136 >      }
1137 > #endif
1138 >      
1139 >    } else {
1140 >      // atom1 is not new, so don't bother updating properties of that atom:
1141 > #ifdef IS_MPI
1142 >    idat.atid2 = identsCol[atom2];
1143 >
1144 >    if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1145 >      idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1146 >    } else {
1147 >      idat.sameRegion = false;
1148 >    }
1149 >
1150      if (storageLayout_ & DataStorage::dslAmat) {
832      idat.A1 = &(atomRowData.aMat[atom1]);
1151        idat.A2 = &(atomColData.aMat[atom2]);
1152      }
1153      
836    if (storageLayout_ & DataStorage::dslElectroFrame) {
837      idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
838      idat.eFrame2 = &(atomColData.electroFrame[atom2]);
839    }
840
1154      if (storageLayout_ & DataStorage::dslTorque) {
842      idat.t1 = &(atomRowData.torque[atom1]);
1155        idat.t2 = &(atomColData.torque[atom2]);
1156      }
1157  
1158 +    if (storageLayout_ & DataStorage::dslDipole) {
1159 +      idat.dipole2 = &(atomColData.dipole[atom2]);
1160 +    }
1161 +
1162 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
1163 +      idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1164 +    }
1165 +
1166      if (storageLayout_ & DataStorage::dslDensity) {
847      idat.rho1 = &(atomRowData.density[atom1]);
1167        idat.rho2 = &(atomColData.density[atom2]);
1168      }
1169  
1170      if (storageLayout_ & DataStorage::dslFunctional) {
852      idat.frho1 = &(atomRowData.functional[atom1]);
1171        idat.frho2 = &(atomColData.functional[atom2]);
1172      }
1173  
1174      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
857      idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1175        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1176      }
1177  
1178      if (storageLayout_ & DataStorage::dslParticlePot) {
862      idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1179        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1180      }
1181  
1182      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
867      idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1183        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1184      }
1185  
1186 < #else
1186 >    if (storageLayout_ & DataStorage::dslFlucQPosition) {
1187 >      idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1188 >    }
1189  
1190 <    idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
1191 <                             ff_->getAtomType(idents[atom2]) );
1190 > #else  
1191 >    idat.atid2 = idents[atom2];
1192  
1193 +    if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1194 +      idat.sameRegion = (regions[atom1] == regions[atom2]);
1195 +    } else {
1196 +      idat.sameRegion = false;
1197 +    }
1198 +
1199      if (storageLayout_ & DataStorage::dslAmat) {
877      idat.A1 = &(snap_->atomData.aMat[atom1]);
1200        idat.A2 = &(snap_->atomData.aMat[atom2]);
1201      }
1202  
881    if (storageLayout_ & DataStorage::dslElectroFrame) {
882      idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
883      idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
884    }
885
1203      if (storageLayout_ & DataStorage::dslTorque) {
887      idat.t1 = &(snap_->atomData.torque[atom1]);
1204        idat.t2 = &(snap_->atomData.torque[atom2]);
1205      }
1206  
1207 +    if (storageLayout_ & DataStorage::dslDipole) {
1208 +      idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1209 +    }
1210 +
1211 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
1212 +      idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1213 +    }
1214 +
1215      if (storageLayout_ & DataStorage::dslDensity) {    
892      idat.rho1 = &(snap_->atomData.density[atom1]);
1216        idat.rho2 = &(snap_->atomData.density[atom2]);
1217      }
1218  
1219      if (storageLayout_ & DataStorage::dslFunctional) {
897      idat.frho1 = &(snap_->atomData.functional[atom1]);
1220        idat.frho2 = &(snap_->atomData.functional[atom2]);
1221      }
1222  
1223      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
902      idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1224        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1225      }
1226  
1227      if (storageLayout_ & DataStorage::dslParticlePot) {
907      idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1228        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1229      }
1230  
1231      if (storageLayout_ & DataStorage::dslSkippedCharge) {
912      idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1232        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1233      }
1234 +
1235 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1236 +      idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1237 +    }
1238 +
1239   #endif
1240 +    }
1241    }
917
1242    
1243 <  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {    
1243 >  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat,
1244 >                                                       int atom1, int atom2) {  
1245   #ifdef IS_MPI
1246 <    pot_row[atom1] += 0.5 *  *(idat.pot);
1247 <    pot_col[atom2] += 0.5 *  *(idat.pot);
1246 >    pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1247 >    pot_col[atom2] += RealType(0.5) *  *(idat.pot);
1248 >    expot_row[atom1] += RealType(0.5) *  *(idat.excludedPot);
1249 >    expot_col[atom2] += RealType(0.5) *  *(idat.excludedPot);
1250  
1251      atomRowData.force[atom1] += *(idat.f1);
1252      atomColData.force[atom2] -= *(idat.f1);
1253 +
1254 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1255 +      atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1256 +      atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1257 +    }
1258 +
1259 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1260 +      atomRowData.electricField[atom1] += *(idat.eField1);
1261 +      atomColData.electricField[atom2] += *(idat.eField2);
1262 +    }
1263 +
1264 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1265 +      atomRowData.sitePotential[atom1] += *(idat.sPot1);
1266 +      atomColData.sitePotential[atom2] += *(idat.sPot2);
1267 +    }
1268 +
1269   #else
1270      pairwisePot += *(idat.pot);
1271 +    excludedPot += *(idat.excludedPot);
1272  
1273      snap_->atomData.force[atom1] += *(idat.f1);
1274      snap_->atomData.force[atom2] -= *(idat.f1);
1275 +
1276 +    if (idat.doParticlePot) {
1277 +      // This is the pairwise contribution to the particle pot.  The
1278 +      // embedding contribution is added in each of the low level
1279 +      // non-bonded routines.  In parallel, this calculation is done
1280 +      // in collectData, not in unpackInteractionData.
1281 +      snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw);
1282 +      snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw);
1283 +    }
1284 +    
1285 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1286 +      snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1287 +      snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1288 +    }
1289 +
1290 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1291 +      snap_->atomData.electricField[atom1] += *(idat.eField1);
1292 +      snap_->atomData.electricField[atom2] += *(idat.eField2);
1293 +    }
1294 +
1295 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1296 +      snap_->atomData.sitePotential[atom1] += *(idat.sPot1);
1297 +      snap_->atomData.sitePotential[atom2] += *(idat.sPot2);
1298 +    }
1299 +
1300   #endif
1301      
1302    }
# Line 935 | Line 1304 | namespace OpenMD {
1304    /*
1305     * buildNeighborList
1306     *
1307 <   * first element of pair is row-indexed CutoffGroup
1308 <   * second element of pair is column-indexed CutoffGroup
1309 <   */
1310 <  vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1311 <      
1312 <    vector<pair<int, int> > neighborList;
1313 <    groupCutoffs cuts;
1307 >   * Constructs the Verlet neighbor list for a force-matrix
1308 >   * decomposition.  In this case, each processor is responsible for
1309 >   * row-site interactions with column-sites.
1310 >   *
1311 >   * neighborList is returned as a packed array of neighboring
1312 >   * column-ordered CutoffGroups.  The starting position in
1313 >   * neighborList for each row-ordered CutoffGroup is given by the
1314 >   * returned vector point.
1315 >   */
1316 >  void ForceMatrixDecomposition::buildNeighborList(vector<int>& neighborList,
1317 >                                                   vector<int>& point) {
1318 >    neighborList.clear();
1319 >    point.clear();
1320 >    int len = 0;
1321 >    
1322      bool doAllPairs = false;
1323  
1324 +    Snapshot* snap_ = sman_->getCurrentSnapshot();
1325 +    Mat3x3d box;
1326 +    Mat3x3d invBox;
1327 +
1328 +    Vector3d rs, scaled, dr;
1329 +    Vector3i whichCell;
1330 +    int cellIndex;
1331 +
1332   #ifdef IS_MPI
1333      cellListRow_.clear();
1334      cellListCol_.clear();
1335 +    point.resize(nGroupsInRow_+1);
1336   #else
1337      cellList_.clear();
1338 +    point.resize(nGroups_+1);
1339   #endif
1340 +    
1341 +    if (!usePeriodicBoundaryConditions_) {
1342 +      box = snap_->getBoundingBox();
1343 +      invBox = snap_->getInvBoundingBox();
1344 +    } else {
1345 +      box = snap_->getHmat();
1346 +      invBox = snap_->getInvHmat();
1347 +    }
1348 +    
1349 +    Vector3d A = box.getColumn(0);
1350 +    Vector3d B = box.getColumn(1);
1351 +    Vector3d C = box.getColumn(2);
1352  
1353 <    RealType rList_ = (largestRcut_ + skinThickness_);
1354 <    RealType rl2 = rList_ * rList_;
1355 <    Snapshot* snap_ = sman_->getCurrentSnapshot();
1356 <    Mat3x3d Hmat = snap_->getHmat();
958 <    Vector3d Hx = Hmat.getColumn(0);
959 <    Vector3d Hy = Hmat.getColumn(1);
960 <    Vector3d Hz = Hmat.getColumn(2);
1353 >    // Required for triclinic cells
1354 >    Vector3d AxB = cross(A, B);
1355 >    Vector3d BxC = cross(B, C);
1356 >    Vector3d CxA = cross(C, A);
1357  
1358 <    nCells_.x() = (int) ( Hx.length() )/ rList_;
1359 <    nCells_.y() = (int) ( Hy.length() )/ rList_;
1360 <    nCells_.z() = (int) ( Hz.length() )/ rList_;
1358 >    // unit vectors perpendicular to the faces of the triclinic cell:
1359 >    AxB.normalize();
1360 >    BxC.normalize();
1361 >    CxA.normalize();
1362  
1363 <    // handle small boxes where the cell offsets can end up repeating cells
1363 >    // A set of perpendicular lengths in triclinic cells:
1364 >    RealType Wa = abs(dot(A, BxC));
1365 >    RealType Wb = abs(dot(B, CxA));
1366 >    RealType Wc = abs(dot(C, AxB));
1367      
1368 +    nCells_.x() = int( Wa / rList_ );
1369 +    nCells_.y() = int( Wb / rList_ );
1370 +    nCells_.z() = int( Wc / rList_ );
1371 +    
1372 +    // handle small boxes where the cell offsets can end up repeating cells
1373      if (nCells_.x() < 3) doAllPairs = true;
1374      if (nCells_.y() < 3) doAllPairs = true;
1375      if (nCells_.z() < 3) doAllPairs = true;
1376 <
972 <    Mat3x3d invHmat = snap_->getInvHmat();
973 <    Vector3d rs, scaled, dr;
974 <    Vector3i whichCell;
975 <    int cellIndex;
1376 >    
1377      int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1378 <
1378 >    
1379   #ifdef IS_MPI
1380      cellListRow_.resize(nCtot);
1381      cellListCol_.resize(nCtot);
1382   #else
1383      cellList_.resize(nCtot);
1384   #endif
1385 <
1385 >    
1386      if (!doAllPairs) {
1387 +      
1388   #ifdef IS_MPI
1389 <
1389 >      
1390        for (int i = 0; i < nGroupsInRow_; i++) {
1391          rs = cgRowData.position[i];
1392          
1393          // scaled positions relative to the box vectors
1394 <        scaled = invHmat * rs;
1394 >        scaled = invBox * rs;
1395          
1396          // wrap the vector back into the unit box by subtracting integer box
1397          // numbers
1398          for (int j = 0; j < 3; j++) {
1399            scaled[j] -= roundMe(scaled[j]);
1400            scaled[j] += 0.5;
1401 +          // Handle the special case when an object is exactly on the
1402 +          // boundary (a scaled coordinate of 1.0 is the same as
1403 +          // scaled coordinate of 0.0)
1404 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1405          }
1406          
1407          // find xyz-indices of cell that cutoffGroup is in.
# Line 1009 | Line 1415 | namespace OpenMD {
1415          // add this cutoff group to the list of groups in this cell;
1416          cellListRow_[cellIndex].push_back(i);
1417        }
1012      
1418        for (int i = 0; i < nGroupsInCol_; i++) {
1419          rs = cgColData.position[i];
1420          
1421          // scaled positions relative to the box vectors
1422 <        scaled = invHmat * rs;
1422 >        scaled = invBox * rs;
1423          
1424          // wrap the vector back into the unit box by subtracting integer box
1425          // numbers
1426          for (int j = 0; j < 3; j++) {
1427            scaled[j] -= roundMe(scaled[j]);
1428            scaled[j] += 0.5;
1429 +          // Handle the special case when an object is exactly on the
1430 +          // boundary (a scaled coordinate of 1.0 is the same as
1431 +          // scaled coordinate of 0.0)
1432 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1433          }
1434          
1435          // find xyz-indices of cell that cutoffGroup is in.
# Line 1034 | Line 1443 | namespace OpenMD {
1443          // add this cutoff group to the list of groups in this cell;
1444          cellListCol_[cellIndex].push_back(i);
1445        }
1446 +            
1447   #else
1448        for (int i = 0; i < nGroups_; i++) {
1449          rs = snap_->cgData.position[i];
1450          
1451          // scaled positions relative to the box vectors
1452 <        scaled = invHmat * rs;
1452 >        scaled = invBox * rs;
1453          
1454          // wrap the vector back into the unit box by subtracting integer box
1455          // numbers
1456          for (int j = 0; j < 3; j++) {
1457            scaled[j] -= roundMe(scaled[j]);
1458            scaled[j] += 0.5;
1459 +          // Handle the special case when an object is exactly on the
1460 +          // boundary (a scaled coordinate of 1.0 is the same as
1461 +          // scaled coordinate of 0.0)
1462 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1463          }
1464          
1465          // find xyz-indices of cell that cutoffGroup is in.
1466 <        whichCell.x() = nCells_.x() * scaled.x();
1467 <        whichCell.y() = nCells_.y() * scaled.y();
1468 <        whichCell.z() = nCells_.z() * scaled.z();
1466 >        whichCell.x() = int(nCells_.x() * scaled.x());
1467 >        whichCell.y() = int(nCells_.y() * scaled.y());
1468 >        whichCell.z() = int(nCells_.z() * scaled.z());
1469          
1470          // find single index of this cell:
1471 <        cellIndex = Vlinear(whichCell, nCells_);      
1471 >        cellIndex = Vlinear(whichCell, nCells_);
1472          
1473          // add this cutoff group to the list of groups in this cell;
1474          cellList_[cellIndex].push_back(i);
1475        }
1476 +
1477   #endif
1478  
1064      for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1065        for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1066          for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1067            Vector3i m1v(m1x, m1y, m1z);
1068            int m1 = Vlinear(m1v, nCells_);
1069            
1070            for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1071                 os != cellOffsets_.end(); ++os) {
1072              
1073              Vector3i m2v = m1v + (*os);
1074              
1075              if (m2v.x() >= nCells_.x()) {
1076                m2v.x() = 0;          
1077              } else if (m2v.x() < 0) {
1078                m2v.x() = nCells_.x() - 1;
1079              }
1080              
1081              if (m2v.y() >= nCells_.y()) {
1082                m2v.y() = 0;          
1083              } else if (m2v.y() < 0) {
1084                m2v.y() = nCells_.y() - 1;
1085              }
1086              
1087              if (m2v.z() >= nCells_.z()) {
1088                m2v.z() = 0;          
1089              } else if (m2v.z() < 0) {
1090                m2v.z() = nCells_.z() - 1;
1091              }
1092              
1093              int m2 = Vlinear (m2v, nCells_);
1094              
1479   #ifdef IS_MPI
1480 <              for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1481 <                   j1 != cellListRow_[m1].end(); ++j1) {
1098 <                for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1099 <                     j2 != cellListCol_[m2].end(); ++j2) {
1100 <                  
1101 <                  // Always do this if we're in different cells or if
1102 <                  // we're in the same cell and the global index of the
1103 <                  // j2 cutoff group is less than the j1 cutoff group
1104 <                  
1105 <                  if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
1106 <                    dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1107 <                    snap_->wrapVector(dr);
1108 <                    cuts = getGroupCutoffs( (*j1), (*j2) );
1109 <                    if (dr.lengthSquare() < cuts.third) {
1110 <                      neighborList.push_back(make_pair((*j1), (*j2)));
1111 <                    }
1112 <                  }
1113 <                }
1114 <              }
1480 >      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1481 >        rs = cgRowData.position[j1];
1482   #else
1483 +
1484 +      for (int j1 = 0; j1 < nGroups_; j1++) {
1485 +        rs = snap_->cgData.position[j1];
1486 + #endif
1487 +        point[j1] = len;
1488 +        
1489 +        // scaled positions relative to the box vectors
1490 +        scaled = invBox * rs;
1491 +        
1492 +        // wrap the vector back into the unit box by subtracting integer box
1493 +        // numbers
1494 +        for (int j = 0; j < 3; j++) {
1495 +          scaled[j] -= roundMe(scaled[j]);
1496 +          scaled[j] += 0.5;
1497 +          // Handle the special case when an object is exactly on the
1498 +          // boundary (a scaled coordinate of 1.0 is the same as
1499 +          // scaled coordinate of 0.0)
1500 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1501 +        }
1502 +        
1503 +        // find xyz-indices of cell that cutoffGroup is in.
1504 +        whichCell.x() = nCells_.x() * scaled.x();
1505 +        whichCell.y() = nCells_.y() * scaled.y();
1506 +        whichCell.z() = nCells_.z() * scaled.z();
1507 +        
1508 +        // find single index of this cell:
1509 +        int m1 = Vlinear(whichCell, nCells_);
1510 +
1511 +        for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1512 +             os != cellOffsets_.end(); ++os) {
1513                
1514 <              for (vector<int>::iterator j1 = cellList_[m1].begin();
1515 <                   j1 != cellList_[m1].end(); ++j1) {
1516 <                for (vector<int>::iterator j2 = cellList_[m2].begin();
1517 <                     j2 != cellList_[m2].end(); ++j2) {
1518 <                  
1519 <                  // Always do this if we're in different cells or if
1520 <                  // we're in the same cell and the global index of the
1521 <                  // j2 cutoff group is less than the j1 cutoff group
1522 <                  
1523 <                  if (m2 != m1 || (*j2) < (*j1)) {
1524 <                    dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1525 <                    snap_->wrapVector(dr);
1526 <                    cuts = getGroupCutoffs( (*j1), (*j2) );
1527 <                    if (dr.lengthSquare() < cuts.third) {
1528 <                      neighborList.push_back(make_pair((*j1), (*j2)));
1529 <                    }
1530 <                  }
1531 <                }
1514 >          Vector3i m2v = whichCell + (*os);
1515 >
1516 >          if (m2v.x() >= nCells_.x()) {
1517 >            m2v.x() = 0;          
1518 >          } else if (m2v.x() < 0) {
1519 >            m2v.x() = nCells_.x() - 1;
1520 >          }
1521 >          
1522 >          if (m2v.y() >= nCells_.y()) {
1523 >            m2v.y() = 0;          
1524 >          } else if (m2v.y() < 0) {
1525 >            m2v.y() = nCells_.y() - 1;
1526 >          }
1527 >          
1528 >          if (m2v.z() >= nCells_.z()) {
1529 >            m2v.z() = 0;          
1530 >          } else if (m2v.z() < 0) {
1531 >            m2v.z() = nCells_.z() - 1;
1532 >          }
1533 >          int m2 = Vlinear (m2v, nCells_);                                      
1534 > #ifdef IS_MPI
1535 >          for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1536 >               j2 != cellListCol_[m2].end(); ++j2) {
1537 >            
1538 >            // In parallel, we need to visit *all* pairs of row
1539 >            // & column indicies and will divide labor in the
1540 >            // force evaluation later.
1541 >            dr = cgColData.position[(*j2)] - rs;
1542 >            if (usePeriodicBoundaryConditions_) {
1543 >              snap_->wrapVector(dr);
1544 >            }
1545 >            if (dr.lengthSquare() < rListSq_) {
1546 >              neighborList.push_back( (*j2) );
1547 >              ++len;
1548 >            }                
1549 >          }        
1550 > #else
1551 >          for (vector<int>::iterator j2 = cellList_[m2].begin();
1552 >               j2 != cellList_[m2].end(); ++j2) {
1553 >          
1554 >            // Always do this if we're in different cells or if
1555 >            // we're in the same cell and the global index of
1556 >            // the j2 cutoff group is greater than or equal to
1557 >            // the j1 cutoff group.  Note that Rappaport's code
1558 >            // has a "less than" conditional here, but that
1559 >            // deals with atom-by-atom computation.  OpenMD
1560 >            // allows atoms within a single cutoff group to
1561 >            // interact with each other.
1562 >            
1563 >            if ( (*j2) >= j1 ) {
1564 >              
1565 >              dr = snap_->cgData.position[(*j2)] - rs;
1566 >              if (usePeriodicBoundaryConditions_) {
1567 >                snap_->wrapVector(dr);
1568                }
1569 < #endif
1569 >              if ( dr.lengthSquare() < rListSq_) {
1570 >                neighborList.push_back( (*j2) );
1571 >                ++len;
1572 >              }
1573              }
1574 <          }
1574 >          }                
1575 > #endif
1576          }
1577 <      }
1577 >      }      
1578      } else {
1579        // branch to do all cutoff group pairs
1580   #ifdef IS_MPI
1581        for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1582 <        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {      
1583 <          dr = cgColData.position[j2] - cgRowData.position[j1];
1584 <          snap_->wrapVector(dr);
1585 <          cuts = getGroupCutoffs( j1, j2 );
1586 <          if (dr.lengthSquare() < cuts.third) {
1587 <            neighborList.push_back(make_pair(j1, j2));
1582 >        point[j1] = len;
1583 >        rs = cgRowData.position[j1];
1584 >        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1585 >          dr = cgColData.position[j2] - rs;
1586 >          if (usePeriodicBoundaryConditions_) {
1587 >            snap_->wrapVector(dr);
1588            }
1589 +          if (dr.lengthSquare() < rListSq_) {
1590 +            neighborList.push_back( j2 );
1591 +            ++len;
1592 +          }
1593          }
1594 <      }
1594 >      }      
1595   #else
1596 <      for (int j1 = 0; j1 < nGroups_ - 1; j1++) {
1597 <        for (int j2 = j1 + 1; j2 < nGroups_; j2++) {
1598 <          dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1599 <          snap_->wrapVector(dr);
1600 <          cuts = getGroupCutoffs( j1, j2 );
1601 <          if (dr.lengthSquare() < cuts.third) {
1602 <            neighborList.push_back(make_pair(j1, j2));
1596 >      // include all groups here.
1597 >      for (int j1 = 0; j1 < nGroups_; j1++) {
1598 >        point[j1] = len;
1599 >        rs = snap_->cgData.position[j1];
1600 >        // include self group interactions j2 == j1
1601 >        for (int j2 = j1; j2 < nGroups_; j2++) {
1602 >          dr = snap_->cgData.position[j2] - rs;
1603 >          if (usePeriodicBoundaryConditions_) {
1604 >            snap_->wrapVector(dr);
1605            }
1606 <        }
1607 <      }        
1606 >          if (dr.lengthSquare() < rListSq_) {
1607 >            neighborList.push_back( j2 );
1608 >            ++len;
1609 >          }
1610 >        }    
1611 >      }
1612   #endif
1613      }
1614 <      
1614 >
1615 > #ifdef IS_MPI
1616 >    point[nGroupsInRow_] = len;
1617 > #else
1618 >    point[nGroups_] = len;
1619 > #endif
1620 >  
1621      // save the local cutoff group positions for the check that is
1622      // done on each loop:
1623      saved_CG_positions_.clear();
1624 +    saved_CG_positions_.reserve(nGroups_);
1625      for (int i = 0; i < nGroups_; i++)
1626        saved_CG_positions_.push_back(snap_->cgData.position[i]);
1173    
1174    return neighborList;
1627    }
1628   } //end namespace OpenMD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines