ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing:
branches/development/src/parallel/ForceDecomposition.cpp (file contents), Revision 1547 by gezelter, Mon Apr 11 18:44:16 2011 UTC vs.
trunk/src/parallel/ForceMatrixDecomposition.cpp (file contents), Revision 2071 by gezelter, Sat Mar 7 21:41:51 2015 UTC

# Line 35 | Line 35
35   *                                                                      
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38 < * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
39 < * [4]  Vardeman & Gezelter, in progress (2009).                        
38 > * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).          
39 > * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40 > * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42 < #include "parallel/ForceDecomposition.hpp"
42 > #include "parallel/ForceMatrixDecomposition.hpp"
43   #include "math/SquareMatrix3.hpp"
44   #include "nonbonded/NonBondedInteraction.hpp"
45   #include "brains/SnapshotManager.hpp"
46 + #include "brains/PairList.hpp"
47  
48   using namespace std;
49   namespace OpenMD {
50  
51 +  ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52 +
53 +    // Row and colum scans must visit all surrounding cells
54 +    cellOffsets_.clear();
55 +    cellOffsets_.push_back( Vector3i(-1,-1,-1) );
56 +    cellOffsets_.push_back( Vector3i( 0,-1,-1) );
57 +    cellOffsets_.push_back( Vector3i( 1,-1,-1) );                          
58 +    cellOffsets_.push_back( Vector3i(-1, 0,-1) );
59 +    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
60 +    cellOffsets_.push_back( Vector3i( 1, 0,-1) );
61 +    cellOffsets_.push_back( Vector3i(-1, 1,-1) );
62 +    cellOffsets_.push_back( Vector3i( 0, 1,-1) );      
63 +    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
64 +    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
65 +    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
66 +    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
67 +    cellOffsets_.push_back( Vector3i(-1, 0, 0) );      
68 +    cellOffsets_.push_back( Vector3i( 0, 0, 0) );
69 +    cellOffsets_.push_back( Vector3i( 1, 0, 0) );
70 +    cellOffsets_.push_back( Vector3i(-1, 1, 0) );
71 +    cellOffsets_.push_back( Vector3i( 0, 1, 0) );
72 +    cellOffsets_.push_back( Vector3i( 1, 1, 0) );
73 +    cellOffsets_.push_back( Vector3i(-1,-1, 1) );
74 +    cellOffsets_.push_back( Vector3i( 0,-1, 1) );
75 +    cellOffsets_.push_back( Vector3i( 1,-1, 1) );
76 +    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
77 +    cellOffsets_.push_back( Vector3i( 0, 0, 1) );
78 +    cellOffsets_.push_back( Vector3i( 1, 0, 1) );
79 +    cellOffsets_.push_back( Vector3i(-1, 1, 1) );
80 +    cellOffsets_.push_back( Vector3i( 0, 1, 1) );
81 +    cellOffsets_.push_back( Vector3i( 1, 1, 1) );
82 +  }
83 +
84 +
85    /**
86     * distributeInitialData is essentially a copy of the older fortran
87     * SimulationSetup
88     */
89 <  
90 <  void ForceDecomposition::distributeInitialData() {
91 < #ifdef IS_MPI    
92 <    Snapshot* snap = sman_->getCurrentSnapshot();
93 <    int nLocal = snap->getNumberOfAtoms();
94 <    int nGroups = snap->getNumberOfCutoffGroups();
89 >  void ForceMatrixDecomposition::distributeInitialData() {
90 >    snap_ = sman_->getCurrentSnapshot();
91 >    storageLayout_ = sman_->getStorageLayout();
92 >    ff_ = info_->getForceField();
93 >    nLocal_ = snap_->getNumberOfAtoms();
94 >  
95 >    nGroups_ = info_->getNLocalCutoffGroups();
96 >    // gather the information for atomtype IDs (atids):
97 >    idents = info_->getIdentArray();
98 >    regions = info_->getRegions();
99 >    AtomLocalToGlobal = info_->getGlobalAtomIndices();
100 >    cgLocalToGlobal = info_->getGlobalGroupIndices();
101 >    vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
102  
103 <    AtomCommIntI = new Communicator<Row,int>(nLocal);
61 <    AtomCommRealI = new Communicator<Row,RealType>(nLocal);
62 <    AtomCommVectorI = new Communicator<Row,Vector3d>(nLocal);
63 <    AtomCommMatrixI = new Communicator<Row,Mat3x3d>(nLocal);
103 >    massFactors = info_->getMassFactors();
104  
105 <    AtomCommIntJ = new Communicator<Column,int>(nLocal);
106 <    AtomCommRealJ = new Communicator<Column,RealType>(nLocal);
107 <    AtomCommVectorJ = new Communicator<Column,Vector3d>(nLocal);
108 <    AtomCommMatrixJ = new Communicator<Column,Mat3x3d>(nLocal);
105 >    PairList* excludes = info_->getExcludedInteractions();
106 >    PairList* oneTwo = info_->getOneTwoInteractions();
107 >    PairList* oneThree = info_->getOneThreeInteractions();
108 >    PairList* oneFour = info_->getOneFourInteractions();
109 >    
110 >    if (needVelocities_)
111 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition |
112 >                                     DataStorage::dslVelocity);
113 >    else
114 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition);
115 >    
116 > #ifdef IS_MPI
117 >
118 >    MPI_Comm row = rowComm.getComm();
119 >    MPI_Comm col = colComm.getComm();
120  
121 <    cgCommIntI = new Communicator<Row,int>(nGroups);
122 <    cgCommVectorI = new Communicator<Row,Vector3d>(nGroups);
123 <    cgCommIntJ = new Communicator<Column,int>(nGroups);
124 <    cgCommVectorJ = new Communicator<Column,Vector3d>(nGroups);
121 >    AtomPlanIntRow = new Plan<int>(row, nLocal_);
122 >    AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
123 >    AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
124 >    AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
125 >    AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
126  
127 <    int nAtomsInRow = AtomCommIntI->getSize();
128 <    int nAtomsInCol = AtomCommIntJ->getSize();
129 <    int nGroupsInRow = cgCommIntI->getSize();
130 <    int nGroupsInCol = cgCommIntJ->getSize();
127 >    AtomPlanIntColumn = new Plan<int>(col, nLocal_);
128 >    AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
129 >    AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
130 >    AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
131 >    AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
132  
133 <    vector<vector<RealType> > pot_row(N_INTERACTION_FAMILIES,
134 <                                      vector<RealType> (nAtomsInRow, 0.0));
135 <    vector<vector<RealType> > pot_col(N_INTERACTION_FAMILIES,
136 <                                      vector<RealType> (nAtomsInCol, 0.0));
133 >    cgPlanIntRow = new Plan<int>(row, nGroups_);
134 >    cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
135 >    cgPlanIntColumn = new Plan<int>(col, nGroups_);
136 >    cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
137 >
138 >    nAtomsInRow_ = AtomPlanIntRow->getSize();
139 >    nAtomsInCol_ = AtomPlanIntColumn->getSize();
140 >    nGroupsInRow_ = cgPlanIntRow->getSize();
141 >    nGroupsInCol_ = cgPlanIntColumn->getSize();
142 >
143 >    // Modify the data storage objects with the correct layouts and sizes:
144 >    atomRowData.resize(nAtomsInRow_);
145 >    atomRowData.setStorageLayout(storageLayout_);
146 >    atomColData.resize(nAtomsInCol_);
147 >    atomColData.setStorageLayout(storageLayout_);
148 >    cgRowData.resize(nGroupsInRow_);
149 >    cgRowData.setStorageLayout(DataStorage::dslPosition);
150 >    cgColData.resize(nGroupsInCol_);
151 >    if (needVelocities_)
152 >      // we only need column velocities if we need them.
153 >      cgColData.setStorageLayout(DataStorage::dslPosition |
154 >                                 DataStorage::dslVelocity);
155 >    else    
156 >      cgColData.setStorageLayout(DataStorage::dslPosition);
157 >      
158 >    identsRow.resize(nAtomsInRow_);
159 >    identsCol.resize(nAtomsInCol_);
160      
161 <    vector<RealType> pot_local(N_INTERACTION_FAMILIES, 0.0);
161 >    AtomPlanIntRow->gather(idents, identsRow);
162 >    AtomPlanIntColumn->gather(idents, identsCol);
163  
164 <    // gather the information for atomtype IDs (atids):
165 <    vector<int> identsLocal = info_->getIdentArray();
166 <    identsRow.reserve(nAtomsInRow);
167 <    identsCol.reserve(nAtomsInCol);
164 >    regionsRow.resize(nAtomsInRow_);
165 >    regionsCol.resize(nAtomsInCol_);
166 >    
167 >    AtomPlanIntRow->gather(regions, regionsRow);
168 >    AtomPlanIntColumn->gather(regions, regionsCol);
169 >    
170 >    // allocate memory for the parallel objects
171 >    atypesRow.resize(nAtomsInRow_);
172 >    atypesCol.resize(nAtomsInCol_);
173  
174 <    AtomCommIntI->gather(identsLocal, identsRow);
175 <    AtomCommIntJ->gather(identsLocal, identsCol);
174 >    for (int i = 0; i < nAtomsInRow_; i++)
175 >      atypesRow[i] = ff_->getAtomType(identsRow[i]);
176 >    for (int i = 0; i < nAtomsInCol_; i++)
177 >      atypesCol[i] = ff_->getAtomType(identsCol[i]);        
178  
179 <    AtomLocalToGlobal = info_->getLocalToGlobalAtomIndex();
180 <    AtomCommIntI->gather(AtomLocalToGlobal, AtomRowToGlobal);
97 <    AtomCommIntJ->gather(AtomLocalToGlobal, AtomColToGlobal);
179 >    pot_row.resize(nAtomsInRow_);
180 >    pot_col.resize(nAtomsInCol_);
181  
182 <    cgLocalToGlobal = info_->getLocalToGlobalCutoffGroupIndex();
183 <    cgCommIntI->gather(cgLocalToGlobal, cgRowToGlobal);
101 <    cgCommIntJ->gather(cgLocalToGlobal, cgColToGlobal);
182 >    expot_row.resize(nAtomsInRow_);
183 >    expot_col.resize(nAtomsInCol_);
184  
185 <    
185 >    AtomRowToGlobal.resize(nAtomsInRow_);
186 >    AtomColToGlobal.resize(nAtomsInCol_);
187 >    AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
188 >    AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
189  
190 <    // still need:
191 <    // topoDist
192 <    // exclude
190 >    cgRowToGlobal.resize(nGroupsInRow_);
191 >    cgColToGlobal.resize(nGroupsInCol_);
192 >    cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
193 >    cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
194 >
195 >    massFactorsRow.resize(nAtomsInRow_);
196 >    massFactorsCol.resize(nAtomsInCol_);
197 >    AtomPlanRealRow->gather(massFactors, massFactorsRow);
198 >    AtomPlanRealColumn->gather(massFactors, massFactorsCol);
199 >
200 >    groupListRow_.clear();
201 >    groupListRow_.resize(nGroupsInRow_);
202 >    for (int i = 0; i < nGroupsInRow_; i++) {
203 >      int gid = cgRowToGlobal[i];
204 >      for (int j = 0; j < nAtomsInRow_; j++) {
205 >        int aid = AtomRowToGlobal[j];
206 >        if (globalGroupMembership[aid] == gid)
207 >          groupListRow_[i].push_back(j);
208 >      }      
209 >    }
210 >
211 >    groupListCol_.clear();
212 >    groupListCol_.resize(nGroupsInCol_);
213 >    for (int i = 0; i < nGroupsInCol_; i++) {
214 >      int gid = cgColToGlobal[i];
215 >      for (int j = 0; j < nAtomsInCol_; j++) {
216 >        int aid = AtomColToGlobal[j];
217 >        if (globalGroupMembership[aid] == gid)
218 >          groupListCol_[i].push_back(j);
219 >      }      
220 >    }
221 >
222 >    excludesForAtom.clear();
223 >    excludesForAtom.resize(nAtomsInRow_);
224 >    toposForAtom.clear();
225 >    toposForAtom.resize(nAtomsInRow_);
226 >    topoDist.clear();
227 >    topoDist.resize(nAtomsInRow_);
228 >    for (int i = 0; i < nAtomsInRow_; i++) {
229 >      int iglob = AtomRowToGlobal[i];
230 >
231 >      for (int j = 0; j < nAtomsInCol_; j++) {
232 >        int jglob = AtomColToGlobal[j];
233 >
234 >        if (excludes->hasPair(iglob, jglob))
235 >          excludesForAtom[i].push_back(j);      
236 >        
237 >        if (oneTwo->hasPair(iglob, jglob)) {
238 >          toposForAtom[i].push_back(j);
239 >          topoDist[i].push_back(1);
240 >        } else {
241 >          if (oneThree->hasPair(iglob, jglob)) {
242 >            toposForAtom[i].push_back(j);
243 >            topoDist[i].push_back(2);
244 >          } else {
245 >            if (oneFour->hasPair(iglob, jglob)) {
246 >              toposForAtom[i].push_back(j);
247 >              topoDist[i].push_back(3);
248 >            }
249 >          }
250 >        }
251 >      }      
252 >    }
253 >
254 > #else
255 >    excludesForAtom.clear();
256 >    excludesForAtom.resize(nLocal_);
257 >    toposForAtom.clear();
258 >    toposForAtom.resize(nLocal_);
259 >    topoDist.clear();
260 >    topoDist.resize(nLocal_);
261 >
262 >    for (int i = 0; i < nLocal_; i++) {
263 >      int iglob = AtomLocalToGlobal[i];
264 >
265 >      for (int j = 0; j < nLocal_; j++) {
266 >        int jglob = AtomLocalToGlobal[j];
267 >
268 >        if (excludes->hasPair(iglob, jglob))
269 >          excludesForAtom[i].push_back(j);              
270 >        
271 >        if (oneTwo->hasPair(iglob, jglob)) {
272 >          toposForAtom[i].push_back(j);
273 >          topoDist[i].push_back(1);
274 >        } else {
275 >          if (oneThree->hasPair(iglob, jglob)) {
276 >            toposForAtom[i].push_back(j);
277 >            topoDist[i].push_back(2);
278 >          } else {
279 >            if (oneFour->hasPair(iglob, jglob)) {
280 >              toposForAtom[i].push_back(j);
281 >              topoDist[i].push_back(3);
282 >            }
283 >          }
284 >        }
285 >      }      
286 >    }
287   #endif
288 +
289 +    // allocate memory for the parallel objects
290 +    atypesLocal.resize(nLocal_);
291 +
292 +    for (int i = 0; i < nLocal_; i++)
293 +      atypesLocal[i] = ff_->getAtomType(idents[i]);
294 +
295 +    groupList_.clear();
296 +    groupList_.resize(nGroups_);
297 +    for (int i = 0; i < nGroups_; i++) {
298 +      int gid = cgLocalToGlobal[i];
299 +      for (int j = 0; j < nLocal_; j++) {
300 +        int aid = AtomLocalToGlobal[j];
301 +        if (globalGroupMembership[aid] == gid) {
302 +          groupList_[i].push_back(j);
303 +        }
304 +      }      
305 +    }    
306    }
307      
308 +  int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
309 +    for (unsigned int j = 0; j < toposForAtom[atom1].size(); j++) {
310 +      if (toposForAtom[atom1][j] == atom2)
311 +        return topoDist[atom1][j];
312 +    }                                          
313 +    return 0;
314 +  }
315  
316 +  void ForceMatrixDecomposition::zeroWorkArrays() {
317 +    pairwisePot = 0.0;
318 +    embeddingPot = 0.0;
319 +    excludedPot = 0.0;
320 +    excludedSelfPot = 0.0;
321  
113  void ForceDecomposition::distributeData()  {
322   #ifdef IS_MPI
323 <    Snapshot* snap = sman_->getCurrentSnapshot();
323 >    if (storageLayout_ & DataStorage::dslForce) {
324 >      fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
325 >      fill(atomColData.force.begin(), atomColData.force.end(), V3Zero);
326 >    }
327 >
328 >    if (storageLayout_ & DataStorage::dslTorque) {
329 >      fill(atomRowData.torque.begin(), atomRowData.torque.end(), V3Zero);
330 >      fill(atomColData.torque.begin(), atomColData.torque.end(), V3Zero);
331 >    }
332 >    
333 >    fill(pot_row.begin(), pot_row.end(),
334 >         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
335 >
336 >    fill(pot_col.begin(), pot_col.end(),
337 >         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
338 >
339 >    fill(expot_row.begin(), expot_row.end(),
340 >         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
341 >
342 >    fill(expot_col.begin(), expot_col.end(),
343 >         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
344 >
345 >    if (storageLayout_ & DataStorage::dslParticlePot) {    
346 >      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
347 >           0.0);
348 >      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
349 >           0.0);
350 >    }
351 >
352 >    if (storageLayout_ & DataStorage::dslDensity) {      
353 >      fill(atomRowData.density.begin(), atomRowData.density.end(), 0.0);
354 >      fill(atomColData.density.begin(), atomColData.density.end(), 0.0);
355 >    }
356 >
357 >    if (storageLayout_ & DataStorage::dslFunctional) {  
358 >      fill(atomRowData.functional.begin(), atomRowData.functional.end(),
359 >           0.0);
360 >      fill(atomColData.functional.begin(), atomColData.functional.end(),
361 >           0.0);
362 >    }
363 >
364 >    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
365 >      fill(atomRowData.functionalDerivative.begin(),
366 >           atomRowData.functionalDerivative.end(), 0.0);
367 >      fill(atomColData.functionalDerivative.begin(),
368 >           atomColData.functionalDerivative.end(), 0.0);
369 >    }
370 >
371 >    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
372 >      fill(atomRowData.skippedCharge.begin(),
373 >           atomRowData.skippedCharge.end(), 0.0);
374 >      fill(atomColData.skippedCharge.begin(),
375 >           atomColData.skippedCharge.end(), 0.0);
376 >    }
377 >
378 >    if (storageLayout_ & DataStorage::dslFlucQForce) {      
379 >      fill(atomRowData.flucQFrc.begin(),
380 >           atomRowData.flucQFrc.end(), 0.0);
381 >      fill(atomColData.flucQFrc.begin(),
382 >           atomColData.flucQFrc.end(), 0.0);
383 >    }
384 >
385 >    if (storageLayout_ & DataStorage::dslElectricField) {    
386 >      fill(atomRowData.electricField.begin(),
387 >           atomRowData.electricField.end(), V3Zero);
388 >      fill(atomColData.electricField.begin(),
389 >           atomColData.electricField.end(), V3Zero);
390 >    }
391 >
392 >    if (storageLayout_ & DataStorage::dslSitePotential) {    
393 >      fill(atomRowData.sitePotential.begin(),
394 >           atomRowData.sitePotential.end(), 0.0);
395 >      fill(atomColData.sitePotential.begin(),
396 >           atomColData.sitePotential.end(), 0.0);
397 >    }
398 >
399 > #endif
400 >    // even in parallel, we need to zero out the local arrays:
401 >
402 >    if (storageLayout_ & DataStorage::dslParticlePot) {      
403 >      fill(snap_->atomData.particlePot.begin(),
404 >           snap_->atomData.particlePot.end(), 0.0);
405 >    }
406 >    
407 >    if (storageLayout_ & DataStorage::dslDensity) {      
408 >      fill(snap_->atomData.density.begin(),
409 >           snap_->atomData.density.end(), 0.0);
410 >    }
411 >
412 >    if (storageLayout_ & DataStorage::dslFunctional) {
413 >      fill(snap_->atomData.functional.begin(),
414 >           snap_->atomData.functional.end(), 0.0);
415 >    }
416 >
417 >    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
418 >      fill(snap_->atomData.functionalDerivative.begin(),
419 >           snap_->atomData.functionalDerivative.end(), 0.0);
420 >    }
421 >
422 >    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
423 >      fill(snap_->atomData.skippedCharge.begin(),
424 >           snap_->atomData.skippedCharge.end(), 0.0);
425 >    }
426 >
427 >    if (storageLayout_ & DataStorage::dslElectricField) {      
428 >      fill(snap_->atomData.electricField.begin(),
429 >           snap_->atomData.electricField.end(), V3Zero);
430 >    }
431 >    if (storageLayout_ & DataStorage::dslSitePotential) {      
432 >      fill(snap_->atomData.sitePotential.begin(),
433 >           snap_->atomData.sitePotential.end(), 0.0);
434 >    }
435 >  }
436 >
437 >
438 >  void ForceMatrixDecomposition::distributeData()  {
439 >  
440 > #ifdef IS_MPI
441 >
442 >    snap_ = sman_->getCurrentSnapshot();
443 >    storageLayout_ = sman_->getStorageLayout();
444      
445 +    bool needsCG = true;
446 +    if(info_->getNCutoffGroups() != info_->getNAtoms())
447 +      needsCG = false;
448 +
449      // gather up the atomic positions
450 <    AtomCommVectorI->gather(snap->atomData.position,
451 <                            snap->atomIData.position);
452 <    AtomCommVectorJ->gather(snap->atomData.position,
453 <                            snap->atomJData.position);
450 >    AtomPlanVectorRow->gather(snap_->atomData.position,
451 >                              atomRowData.position);
452 >    AtomPlanVectorColumn->gather(snap_->atomData.position,
453 >                                 atomColData.position);
454      
455      // gather up the cutoff group positions
456 <    cgCommVectorI->gather(snap->cgData.position,
457 <                          snap->cgIData.position);
458 <    cgCommVectorJ->gather(snap->cgData.position,
459 <                          snap->cgJData.position);
456 >
457 >    if (needsCG) {
458 >      cgPlanVectorRow->gather(snap_->cgData.position,
459 >                              cgRowData.position);
460 >      
461 >      cgPlanVectorColumn->gather(snap_->cgData.position,
462 >                                 cgColData.position);
463 >    }
464 >
465 >
466 >    if (needVelocities_) {
467 >      // gather up the atomic velocities
468 >      AtomPlanVectorColumn->gather(snap_->atomData.velocity,
469 >                                   atomColData.velocity);
470 >
471 >      if (needsCG) {        
472 >        cgPlanVectorColumn->gather(snap_->cgData.velocity,
473 >                                   cgColData.velocity);
474 >      }
475 >    }
476 >
477      
478      // if needed, gather the atomic rotation matrices
479 <    if (snap->atomData.getStorageLayout() & DataStorage::dslAmat) {
480 <      AtomCommMatrixI->gather(snap->atomData.aMat,
481 <                              snap->atomIData.aMat);
482 <      AtomCommMatrixJ->gather(snap->atomData.aMat,
483 <                              snap->atomJData.aMat);
479 >    if (storageLayout_ & DataStorage::dslAmat) {
480 >      AtomPlanMatrixRow->gather(snap_->atomData.aMat,
481 >                                atomRowData.aMat);
482 >      AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
483 >                                   atomColData.aMat);
484      }
485 <    
486 <    // if needed, gather the atomic eletrostatic frames
487 <    if (snap->atomData.getStorageLayout() & DataStorage::dslElectroFrame) {
488 <      AtomCommMatrixI->gather(snap->atomData.electroFrame,
489 <                              snap->atomIData.electroFrame);
490 <      AtomCommMatrixJ->gather(snap->atomData.electroFrame,
491 <                              snap->atomJData.electroFrame);
485 >
486 >    // if needed, gather the atomic eletrostatic information
487 >    if (storageLayout_ & DataStorage::dslDipole) {
488 >      AtomPlanVectorRow->gather(snap_->atomData.dipole,
489 >                                atomRowData.dipole);
490 >      AtomPlanVectorColumn->gather(snap_->atomData.dipole,
491 >                                   atomColData.dipole);
492      }
493 +
494 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
495 +      AtomPlanMatrixRow->gather(snap_->atomData.quadrupole,
496 +                                atomRowData.quadrupole);
497 +      AtomPlanMatrixColumn->gather(snap_->atomData.quadrupole,
498 +                                   atomColData.quadrupole);
499 +    }
500 +        
501 +    // if needed, gather the atomic fluctuating charge values
502 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {
503 +      AtomPlanRealRow->gather(snap_->atomData.flucQPos,
504 +                              atomRowData.flucQPos);
505 +      AtomPlanRealColumn->gather(snap_->atomData.flucQPos,
506 +                                 atomColData.flucQPos);
507 +    }
508 +
509   #endif      
510    }
511    
512 <  void ForceDecomposition::collectIntermediateData() {
512 >  /* collects information obtained during the pre-pair loop onto local
513 >   * data structures.
514 >   */
515 >  void ForceMatrixDecomposition::collectIntermediateData() {
516   #ifdef IS_MPI
149    Snapshot* snap = sman_->getCurrentSnapshot();
150    
151    if (snap->atomData.getStorageLayout() & DataStorage::dslDensity) {
517  
518 <      AtomCommRealI->scatter(snap->atomIData.density,
519 <                             snap->atomData.density);
518 >    snap_ = sman_->getCurrentSnapshot();
519 >    storageLayout_ = sman_->getStorageLayout();
520  
521 <      int n = snap->atomData.density.size();
522 <      std::vector<RealType> rho_tmp(n, 0.0);
523 <      AtomCommRealJ->scatter(snap->atomJData.density, rho_tmp);
521 >    if (storageLayout_ & DataStorage::dslDensity) {
522 >      
523 >      AtomPlanRealRow->scatter(atomRowData.density,
524 >                               snap_->atomData.density);
525 >      
526 >      int n = snap_->atomData.density.size();
527 >      vector<RealType> rho_tmp(n, 0.0);
528 >      AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
529        for (int i = 0; i < n; i++)
530 <        snap->atomData.density[i] += rho_tmp[i];
530 >        snap_->atomData.density[i] += rho_tmp[i];
531      }
532 +
533 +    // this isn't necessary if we don't have polarizable atoms, but
534 +    // we'll leave it here for now.
535 +    if (storageLayout_ & DataStorage::dslElectricField) {
536 +      
537 +      AtomPlanVectorRow->scatter(atomRowData.electricField,
538 +                                 snap_->atomData.electricField);
539 +      
540 +      int n = snap_->atomData.electricField.size();
541 +      vector<Vector3d> field_tmp(n, V3Zero);
542 +      AtomPlanVectorColumn->scatter(atomColData.electricField,
543 +                                    field_tmp);
544 +      for (int i = 0; i < n; i++)
545 +        snap_->atomData.electricField[i] += field_tmp[i];
546 +    }
547   #endif
548    }
549 <  
550 <  void ForceDecomposition::distributeIntermediateData() {
549 >
550 >  /*
551 >   * redistributes information obtained during the pre-pair loop out to
552 >   * row and column-indexed data structures
553 >   */
554 >  void ForceMatrixDecomposition::distributeIntermediateData() {
555   #ifdef IS_MPI
556 <    Snapshot* snap = sman_->getCurrentSnapshot();
557 <    if (snap->atomData.getStorageLayout() & DataStorage::dslFunctional) {
558 <      AtomCommRealI->gather(snap->atomData.functional,
559 <                            snap->atomIData.functional);
560 <      AtomCommRealJ->gather(snap->atomData.functional,
561 <                            snap->atomJData.functional);
556 >    snap_ = sman_->getCurrentSnapshot();
557 >    storageLayout_ = sman_->getStorageLayout();
558 >
559 >    if (storageLayout_ & DataStorage::dslFunctional) {
560 >      AtomPlanRealRow->gather(snap_->atomData.functional,
561 >                              atomRowData.functional);
562 >      AtomPlanRealColumn->gather(snap_->atomData.functional,
563 >                                 atomColData.functional);
564      }
565      
566 <    if (snap->atomData.getStorageLayout() & DataStorage::dslFunctionalDerivative) {
567 <      AtomCommRealI->gather(snap->atomData.functionalDerivative,
568 <                            snap->atomIData.functionalDerivative);
569 <      AtomCommRealJ->gather(snap->atomData.functionalDerivative,
570 <                            snap->atomJData.functionalDerivative);
566 >    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
567 >      AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
568 >                              atomRowData.functionalDerivative);
569 >      AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
570 >                                 atomColData.functionalDerivative);
571      }
572   #endif
573    }
574    
575    
576 <  void ForceDecomposition::collectData() {
576 >  void ForceMatrixDecomposition::collectData() {
577   #ifdef IS_MPI
578 <    Snapshot* snap = sman_->getCurrentSnapshot();
579 <    
580 <    int n = snap->atomData.force.size();
578 >    snap_ = sman_->getCurrentSnapshot();
579 >    storageLayout_ = sman_->getStorageLayout();
580 >
581 >    int n = snap_->atomData.force.size();
582      vector<Vector3d> frc_tmp(n, V3Zero);
583      
584 <    AtomCommVectorI->scatter(snap->atomIData.force, frc_tmp);
584 >    AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
585      for (int i = 0; i < n; i++) {
586 <      snap->atomData.force[i] += frc_tmp[i];
586 >      snap_->atomData.force[i] += frc_tmp[i];
587        frc_tmp[i] = 0.0;
588      }
589      
590 <    AtomCommVectorJ->scatter(snap->atomJData.force, frc_tmp);
591 <    for (int i = 0; i < n; i++)
592 <      snap->atomData.force[i] += frc_tmp[i];
593 <    
594 <    
595 <    if (snap->atomData.getStorageLayout() & DataStorage::dslTorque) {
590 >    AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
591 >    for (int i = 0; i < n; i++) {
592 >      snap_->atomData.force[i] += frc_tmp[i];
593 >    }
594 >        
595 >    if (storageLayout_ & DataStorage::dslTorque) {
596  
597 <      int nt = snap->atomData.force.size();
597 >      int nt = snap_->atomData.torque.size();
598        vector<Vector3d> trq_tmp(nt, V3Zero);
599  
600 <      AtomCommVectorI->scatter(snap->atomIData.torque, trq_tmp);
601 <      for (int i = 0; i < n; i++) {
602 <        snap->atomData.torque[i] += trq_tmp[i];
600 >      AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
601 >      for (int i = 0; i < nt; i++) {
602 >        snap_->atomData.torque[i] += trq_tmp[i];
603          trq_tmp[i] = 0.0;
604        }
605        
606 <      AtomCommVectorJ->scatter(snap->atomJData.torque, trq_tmp);
607 <      for (int i = 0; i < n; i++)
608 <        snap->atomData.torque[i] += trq_tmp[i];
606 >      AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
607 >      for (int i = 0; i < nt; i++)
608 >        snap_->atomData.torque[i] += trq_tmp[i];
609      }
610 +
611 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
612 +
613 +      int ns = snap_->atomData.skippedCharge.size();
614 +      vector<RealType> skch_tmp(ns, 0.0);
615 +
616 +      AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
617 +      for (int i = 0; i < ns; i++) {
618 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
619 +        skch_tmp[i] = 0.0;
620 +      }
621 +      
622 +      AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
623 +      for (int i = 0; i < ns; i++)
624 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
625 +            
626 +    }
627      
628 <    int nLocal = snap->getNumberOfAtoms();
628 >    if (storageLayout_ & DataStorage::dslFlucQForce) {
629  
630 <    vector<vector<RealType> > pot_temp(N_INTERACTION_FAMILIES,
631 <                                       vector<RealType> (nLocal, 0.0));
630 >      int nq = snap_->atomData.flucQFrc.size();
631 >      vector<RealType> fqfrc_tmp(nq, 0.0);
632 >
633 >      AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp);
634 >      for (int i = 0; i < nq; i++) {
635 >        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
636 >        fqfrc_tmp[i] = 0.0;
637 >      }
638 >      
639 >      AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp);
640 >      for (int i = 0; i < nq; i++)
641 >        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
642 >            
643 >    }
644 >
645 >    if (storageLayout_ & DataStorage::dslElectricField) {
646 >
647 >      int nef = snap_->atomData.electricField.size();
648 >      vector<Vector3d> efield_tmp(nef, V3Zero);
649 >
650 >      AtomPlanVectorRow->scatter(atomRowData.electricField, efield_tmp);
651 >      for (int i = 0; i < nef; i++) {
652 >        snap_->atomData.electricField[i] += efield_tmp[i];
653 >        efield_tmp[i] = 0.0;
654 >      }
655 >      
656 >      AtomPlanVectorColumn->scatter(atomColData.electricField, efield_tmp);
657 >      for (int i = 0; i < nef; i++)
658 >        snap_->atomData.electricField[i] += efield_tmp[i];
659 >    }
660 >
661 >    if (storageLayout_ & DataStorage::dslSitePotential) {
662 >
663 >      int nsp = snap_->atomData.sitePotential.size();
664 >      vector<RealType> sp_tmp(nsp, 0.0);
665 >
666 >      AtomPlanRealRow->scatter(atomRowData.sitePotential, sp_tmp);
667 >      for (int i = 0; i < nsp; i++) {
668 >        snap_->atomData.sitePotential[i] += sp_tmp[i];
669 >        sp_tmp[i] = 0.0;
670 >      }
671 >      
672 >      AtomPlanRealColumn->scatter(atomColData.sitePotential, sp_tmp);
673 >      for (int i = 0; i < nsp; i++)
674 >        snap_->atomData.sitePotential[i] += sp_tmp[i];
675 >    }
676 >
677 >    nLocal_ = snap_->getNumberOfAtoms();
678 >
679 >    vector<potVec> pot_temp(nLocal_,
680 >                            Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
681 >    vector<potVec> expot_temp(nLocal_,
682 >                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
683 >
684 >    // scatter/gather pot_row into the members of my column
685 >          
686 >    AtomPlanPotRow->scatter(pot_row, pot_temp);
687 >    AtomPlanPotRow->scatter(expot_row, expot_temp);
688 >
689 >    for (std::size_t ii = 0;  ii < pot_temp.size(); ii++ )
690 >      pairwisePot += pot_temp[ii];
691 >
692 >    for (std::size_t ii = 0;  ii < expot_temp.size(); ii++ )
693 >      excludedPot += expot_temp[ii];
694      
695 <    for (int i = 0; i < N_INTERACTION_FAMILIES; i++) {
696 <      AtomCommRealI->scatter(pot_row[i], pot_temp[i]);
697 <      for (int ii = 0;  ii < pot_temp[i].size(); ii++ ) {
698 <        pot_local[i] += pot_temp[i][ii];
695 >    if (storageLayout_ & DataStorage::dslParticlePot) {
696 >      // This is the pairwise contribution to the particle pot.  The
697 >      // embedding contribution is added in each of the low level
698 >      // non-bonded routines.  In single processor, this is done in
699 >      // unpackInteractionData, not in collectData.
700 >      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
701 >        for (int i = 0; i < nLocal_; i++) {
702 >          // factor of two is because the total potential terms are divided
703 >          // by 2 in parallel due to row/ column scatter      
704 >          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
705 >        }
706        }
707      }
708 +
709 +    fill(pot_temp.begin(), pot_temp.end(),
710 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
711 +    fill(expot_temp.begin(), expot_temp.end(),
712 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
713 +      
714 +    AtomPlanPotColumn->scatter(pot_col, pot_temp);    
715 +    AtomPlanPotColumn->scatter(expot_col, expot_temp);    
716 +    
717 +    for (std::size_t ii = 0;  ii < pot_temp.size(); ii++ )
718 +      pairwisePot += pot_temp[ii];    
719 +
720 +    for (std::size_t ii = 0;  ii < expot_temp.size(); ii++ )
721 +      excludedPot += expot_temp[ii];    
722 +    
723 +    if (storageLayout_ & DataStorage::dslParticlePot) {
724 +      // This is the pairwise contribution to the particle pot.  The
725 +      // embedding contribution is added in each of the low level
726 +      // non-bonded routines.  In single processor, this is done in
727 +      // unpackInteractionData, not in collectData.
728 +      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
729 +        for (int i = 0; i < nLocal_; i++) {
730 +          // factor of two is because the total potential terms are divided
731 +          // by 2 in parallel due to row/ column scatter      
732 +          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
733 +        }
734 +      }
735 +    }
736 +    
737 +    if (storageLayout_ & DataStorage::dslParticlePot) {
738 +      int npp = snap_->atomData.particlePot.size();
739 +      vector<RealType> ppot_temp(npp, 0.0);
740 +
741 +      // This is the direct or embedding contribution to the particle
742 +      // pot.
743 +      
744 +      AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp);
745 +      for (int i = 0; i < npp; i++) {
746 +        snap_->atomData.particlePot[i] += ppot_temp[i];
747 +      }
748 +
749 +      fill(ppot_temp.begin(), ppot_temp.end(), 0.0);
750 +      
751 +      AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp);
752 +      for (int i = 0; i < npp; i++) {
753 +        snap_->atomData.particlePot[i] += ppot_temp[i];
754 +      }
755 +    }
756 +
757 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
758 +      RealType ploc1 = pairwisePot[ii];
759 +      RealType ploc2 = 0.0;
760 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
761 +      pairwisePot[ii] = ploc2;
762 +    }
763 +
764 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
765 +      RealType ploc1 = excludedPot[ii];
766 +      RealType ploc2 = 0.0;
767 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
768 +      excludedPot[ii] = ploc2;
769 +    }
770 +
771 +    // Here be dragons.
772 +    MPI_Comm col = colComm.getComm();
773 +
774 +    MPI_Allreduce(MPI_IN_PLACE,
775 +                  &snap_->frameData.conductiveHeatFlux[0], 3,
776 +                  MPI_REALTYPE, MPI_SUM, col);
777 +
778 +
779   #endif
780 +
781    }
782 +
783 +  /**
784 +   * Collects information obtained during the post-pair (and embedding
785 +   * functional) loops onto local data structures.
786 +   */
787 +  void ForceMatrixDecomposition::collectSelfData() {
788 +
789 + #ifdef IS_MPI
790 +    snap_ = sman_->getCurrentSnapshot();
791 +    storageLayout_ = sman_->getStorageLayout();
792 +
793 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
794 +      RealType ploc1 = embeddingPot[ii];
795 +      RealType ploc2 = 0.0;
796 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
797 +      embeddingPot[ii] = ploc2;
798 +    }    
799 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
800 +      RealType ploc1 = excludedSelfPot[ii];
801 +      RealType ploc2 = 0.0;
802 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
803 +      excludedSelfPot[ii] = ploc2;
804 +    }    
805 + #endif
806 +    
807 +  }
808 +
809 +  int& ForceMatrixDecomposition::getNAtomsInRow() {  
810 + #ifdef IS_MPI
811 +    return nAtomsInRow_;
812 + #else
813 +    return nLocal_;
814 + #endif
815 +  }
816 +
817 +  /**
818 +   * returns the list of atoms belonging to this group.  
819 +   */
820 +  vector<int>& ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
821 + #ifdef IS_MPI
822 +    return groupListRow_[cg1];
823 + #else
824 +    return groupList_[cg1];
825 + #endif
826 +  }
827 +
828 +  vector<int>& ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
829 + #ifdef IS_MPI
830 +    return groupListCol_[cg2];
831 + #else
832 +    return groupList_[cg2];
833 + #endif
834 +  }
835 +  
836 +  Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1,
837 +                                                         int cg2){
838 +
839 +    Vector3d d;
840 + #ifdef IS_MPI
841 +    d = cgColData.position[cg2] - cgRowData.position[cg1];
842 + #else
843 +    d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
844 + #endif
845 +    
846 +    if (usePeriodicBoundaryConditions_) {
847 +      snap_->wrapVector(d);
848 +    }
849 +    return d;    
850 +  }
851 +
852 +  Vector3d& ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){
853 + #ifdef IS_MPI
854 +    return cgColData.velocity[cg2];
855 + #else
856 +    return snap_->cgData.velocity[cg2];
857 + #endif
858 +  }
859 +
860 +  Vector3d& ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){
861 + #ifdef IS_MPI
862 +    return atomColData.velocity[atom2];
863 + #else
864 +    return snap_->atomData.velocity[atom2];
865 + #endif
866 +  }
867 +
868 +
869 +  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1,
870 +                                                             int cg1) {
871 +    Vector3d d;
872 +    
873 + #ifdef IS_MPI
874 +    d = cgRowData.position[cg1] - atomRowData.position[atom1];
875 + #else
876 +    d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
877 + #endif
878 +    if (usePeriodicBoundaryConditions_) {
879 +      snap_->wrapVector(d);
880 +    }
881 +    return d;    
882 +  }
883 +  
884 +  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2,
885 +                                                                int cg2) {
886 +    Vector3d d;
887 +    
888 + #ifdef IS_MPI
889 +    d = cgColData.position[cg2] - atomColData.position[atom2];
890 + #else
891 +    d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
892 + #endif
893 +    if (usePeriodicBoundaryConditions_) {
894 +      snap_->wrapVector(d);
895 +    }
896 +    return d;    
897 +  }
898 +
899 +  RealType& ForceMatrixDecomposition::getMassFactorRow(int atom1) {
900 + #ifdef IS_MPI
901 +    return massFactorsRow[atom1];
902 + #else
903 +    return massFactors[atom1];
904 + #endif
905 +  }
906 +
907 +  RealType& ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
908 + #ifdef IS_MPI
909 +    return massFactorsCol[atom2];
910 + #else
911 +    return massFactors[atom2];
912 + #endif
913 +
914 +  }
915 +    
916 +  Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1,
917 +                                                          int atom2){
918 +    Vector3d d;
919 +    
920 + #ifdef IS_MPI
921 +    d = atomColData.position[atom2] - atomRowData.position[atom1];
922 + #else
923 +    d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
924 + #endif
925 +    if (usePeriodicBoundaryConditions_) {
926 +      snap_->wrapVector(d);
927 +    }
928 +    return d;    
929 +  }
930 +
931 +  vector<int>& ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
932 +    return excludesForAtom[atom1];
933 +  }
934 +
935 +  /**
936 +   * We need to exclude some overcounted interactions that result from
937 +   * the parallel decomposition.
938 +   */
939 +  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2,
940 +                                              int cg1, int cg2) {
941 +    int unique_id_1, unique_id_2;
942 +        
943 + #ifdef IS_MPI
944 +    // in MPI, we have to look up the unique IDs for each atom
945 +    unique_id_1 = AtomRowToGlobal[atom1];
946 +    unique_id_2 = AtomColToGlobal[atom2];
947 +    // group1 = cgRowToGlobal[cg1];
948 +    // group2 = cgColToGlobal[cg2];
949 + #else
950 +    unique_id_1 = AtomLocalToGlobal[atom1];
951 +    unique_id_2 = AtomLocalToGlobal[atom2];
952 +    int group1 = cgLocalToGlobal[cg1];
953 +    int group2 = cgLocalToGlobal[cg2];
954 + #endif  
955 +
956 +    if (unique_id_1 == unique_id_2) return true;
957 +
958 + #ifdef IS_MPI
959 +    // this prevents us from doing the pair on multiple processors
960 +    if (unique_id_1 < unique_id_2) {
961 +      if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
962 +    } else {
963 +      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
964 +    }
965 + #endif    
966 +
967 + #ifndef IS_MPI
968 +    if (group1 == group2) {
969 +      if (unique_id_1 < unique_id_2) return true;
970 +    }
971 + #endif
972 +    
973 +    return false;
974 +  }
975 +
976 +  /**
977 +   * We need to handle the interactions for atoms who are involved in
978 +   * the same rigid body as well as some short range interactions
979 +   * (bonds, bends, torsions) differently from other interactions.
980 +   * We'll still visit the pairwise routines, but with a flag that
981 +   * tells those routines to exclude the pair from direct long range
982 +   * interactions.  Some indirect interactions (notably reaction
983 +   * field) must still be handled for these pairs.
984 +   */
985 +  bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
986 +
987 +    // excludesForAtom was constructed to use row/column indices in the MPI
988 +    // version, and to use local IDs in the non-MPI version:
989 +    
990 +    for (vector<int>::iterator i = excludesForAtom[atom1].begin();
991 +         i != excludesForAtom[atom1].end(); ++i) {
992 +      if ( (*i) == atom2 ) return true;
993 +    }
994 +
995 +    return false;
996 +  }
997 +
998 +
999 +  void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
1000 + #ifdef IS_MPI
1001 +    atomRowData.force[atom1] += fg;
1002 + #else
1003 +    snap_->atomData.force[atom1] += fg;
1004 + #endif
1005 +  }
1006 +
1007 +  void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){
1008 + #ifdef IS_MPI
1009 +    atomColData.force[atom2] += fg;
1010 + #else
1011 +    snap_->atomData.force[atom2] += fg;
1012 + #endif
1013 +  }
1014 +
1015 +    // filling interaction blocks with pointers
1016 +  void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
1017 +                                                     int atom1, int atom2,
1018 +                                                     bool newAtom1) {
1019 +
1020 +    idat.excluded = excludeAtomPair(atom1, atom2);
1021 +
1022 +    if (newAtom1) {
1023 +      
1024 + #ifdef IS_MPI
1025 +      idat.atid1 = identsRow[atom1];
1026 +      idat.atid2 = identsCol[atom2];
1027 +      
1028 +      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1029 +        idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1030 +      } else {
1031 +        idat.sameRegion = false;
1032 +      }
1033 +      
1034 +      if (storageLayout_ & DataStorage::dslAmat) {
1035 +        idat.A1 = &(atomRowData.aMat[atom1]);
1036 +        idat.A2 = &(atomColData.aMat[atom2]);
1037 +      }
1038 +      
1039 +      if (storageLayout_ & DataStorage::dslTorque) {
1040 +        idat.t1 = &(atomRowData.torque[atom1]);
1041 +        idat.t2 = &(atomColData.torque[atom2]);
1042 +      }
1043 +      
1044 +      if (storageLayout_ & DataStorage::dslDipole) {
1045 +        idat.dipole1 = &(atomRowData.dipole[atom1]);
1046 +        idat.dipole2 = &(atomColData.dipole[atom2]);
1047 +      }
1048 +      
1049 +      if (storageLayout_ & DataStorage::dslQuadrupole) {
1050 +        idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1051 +        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1052 +      }
1053 +      
1054 +      if (storageLayout_ & DataStorage::dslDensity) {
1055 +        idat.rho1 = &(atomRowData.density[atom1]);
1056 +        idat.rho2 = &(atomColData.density[atom2]);
1057 +      }
1058 +      
1059 +      if (storageLayout_ & DataStorage::dslFunctional) {
1060 +        idat.frho1 = &(atomRowData.functional[atom1]);
1061 +        idat.frho2 = &(atomColData.functional[atom2]);
1062 +      }
1063 +      
1064 +      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1065 +        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1066 +        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1067 +      }
1068 +      
1069 +      if (storageLayout_ & DataStorage::dslParticlePot) {
1070 +        idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1071 +        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1072 +      }
1073 +      
1074 +      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1075 +        idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1076 +        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1077 +      }
1078 +      
1079 +      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1080 +        idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1081 +        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1082 +      }
1083 +      
1084 + #else
1085 +      
1086 +      idat.atid1 = idents[atom1];
1087 +      idat.atid2 = idents[atom2];
1088 +      
1089 +      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1090 +        idat.sameRegion = (regions[atom1] == regions[atom2]);
1091 +      } else {
1092 +        idat.sameRegion = false;
1093 +      }
1094 +      
1095 +      if (storageLayout_ & DataStorage::dslAmat) {
1096 +        idat.A1 = &(snap_->atomData.aMat[atom1]);
1097 +        idat.A2 = &(snap_->atomData.aMat[atom2]);
1098 +      }
1099 +      
1100 +      if (storageLayout_ & DataStorage::dslTorque) {
1101 +        idat.t1 = &(snap_->atomData.torque[atom1]);
1102 +        idat.t2 = &(snap_->atomData.torque[atom2]);
1103 +      }
1104 +      
1105 +      if (storageLayout_ & DataStorage::dslDipole) {
1106 +        idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1107 +        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1108 +      }
1109 +      
1110 +      if (storageLayout_ & DataStorage::dslQuadrupole) {
1111 +        idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1112 +        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1113 +      }
1114 +      
1115 +      if (storageLayout_ & DataStorage::dslDensity) {    
1116 +        idat.rho1 = &(snap_->atomData.density[atom1]);
1117 +        idat.rho2 = &(snap_->atomData.density[atom2]);
1118 +      }
1119 +      
1120 +      if (storageLayout_ & DataStorage::dslFunctional) {
1121 +        idat.frho1 = &(snap_->atomData.functional[atom1]);
1122 +        idat.frho2 = &(snap_->atomData.functional[atom2]);
1123 +      }
1124 +      
1125 +      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1126 +        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1127 +        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1128 +      }
1129 +      
1130 +      if (storageLayout_ & DataStorage::dslParticlePot) {
1131 +        idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1132 +        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1133 +      }
1134 +      
1135 +      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1136 +        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1137 +        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1138 +      }
1139 +      
1140 +      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1141 +        idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1142 +        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1143 +      }
1144 + #endif
1145 +      
1146 +    } else {
1147 +      // atom1 is not new, so don't bother updating properties of that atom:
1148 + #ifdef IS_MPI
1149 +    idat.atid2 = identsCol[atom2];
1150 +
1151 +    if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1152 +      idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1153 +    } else {
1154 +      idat.sameRegion = false;
1155 +    }
1156 +
1157 +    if (storageLayout_ & DataStorage::dslAmat) {
1158 +      idat.A2 = &(atomColData.aMat[atom2]);
1159 +    }
1160 +    
1161 +    if (storageLayout_ & DataStorage::dslTorque) {
1162 +      idat.t2 = &(atomColData.torque[atom2]);
1163 +    }
1164 +
1165 +    if (storageLayout_ & DataStorage::dslDipole) {
1166 +      idat.dipole2 = &(atomColData.dipole[atom2]);
1167 +    }
1168 +
1169 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
1170 +      idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1171 +    }
1172 +
1173 +    if (storageLayout_ & DataStorage::dslDensity) {
1174 +      idat.rho2 = &(atomColData.density[atom2]);
1175 +    }
1176 +
1177 +    if (storageLayout_ & DataStorage::dslFunctional) {
1178 +      idat.frho2 = &(atomColData.functional[atom2]);
1179 +    }
1180 +
1181 +    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1182 +      idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1183 +    }
1184 +
1185 +    if (storageLayout_ & DataStorage::dslParticlePot) {
1186 +      idat.particlePot2 = &(atomColData.particlePot[atom2]);
1187 +    }
1188 +
1189 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1190 +      idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1191 +    }
1192 +
1193 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {
1194 +      idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1195 +    }
1196 +
1197 + #else  
1198 +    idat.atid2 = idents[atom2];
1199 +
1200 +    if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1201 +      idat.sameRegion = (regions[atom1] == regions[atom2]);
1202 +    } else {
1203 +      idat.sameRegion = false;
1204 +    }
1205 +
1206 +    if (storageLayout_ & DataStorage::dslAmat) {
1207 +      idat.A2 = &(snap_->atomData.aMat[atom2]);
1208 +    }
1209 +
1210 +    if (storageLayout_ & DataStorage::dslTorque) {
1211 +      idat.t2 = &(snap_->atomData.torque[atom2]);
1212 +    }
1213 +
1214 +    if (storageLayout_ & DataStorage::dslDipole) {
1215 +      idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1216 +    }
1217 +
1218 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
1219 +      idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1220 +    }
1221 +
1222 +    if (storageLayout_ & DataStorage::dslDensity) {    
1223 +      idat.rho2 = &(snap_->atomData.density[atom2]);
1224 +    }
1225 +
1226 +    if (storageLayout_ & DataStorage::dslFunctional) {
1227 +      idat.frho2 = &(snap_->atomData.functional[atom2]);
1228 +    }
1229 +
1230 +    if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1231 +      idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1232 +    }
1233 +
1234 +    if (storageLayout_ & DataStorage::dslParticlePot) {
1235 +      idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1236 +    }
1237 +
1238 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
1239 +      idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1240 +    }
1241 +
1242 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1243 +      idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1244 +    }
1245 +
1246 + #endif
1247 +    }
1248 +  }
1249    
1250 +  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat,
1251 +                                                       int atom1, int atom2) {  
1252 + #ifdef IS_MPI
1253 +    pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1254 +    pot_col[atom2] += RealType(0.5) *  *(idat.pot);
1255 +    expot_row[atom1] += RealType(0.5) *  *(idat.excludedPot);
1256 +    expot_col[atom2] += RealType(0.5) *  *(idat.excludedPot);
1257 +
1258 +    atomRowData.force[atom1] += *(idat.f1);
1259 +    atomColData.force[atom2] -= *(idat.f1);
1260 +
1261 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1262 +      atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1263 +      atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1264 +    }
1265 +
1266 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1267 +      atomRowData.electricField[atom1] += *(idat.eField1);
1268 +      atomColData.electricField[atom2] += *(idat.eField2);
1269 +    }
1270 +
1271 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1272 +      atomRowData.sitePotential[atom1] += *(idat.sPot1);
1273 +      atomColData.sitePotential[atom2] += *(idat.sPot2);
1274 +    }
1275 +
1276 + #else
1277 +    pairwisePot += *(idat.pot);
1278 +    excludedPot += *(idat.excludedPot);
1279 +
1280 +    snap_->atomData.force[atom1] += *(idat.f1);
1281 +    snap_->atomData.force[atom2] -= *(idat.f1);
1282 +
1283 +    if (idat.doParticlePot) {
1284 +      // This is the pairwise contribution to the particle pot.  The
1285 +      // embedding contribution is added in each of the low level
1286 +      // non-bonded routines.  In parallel, this calculation is done
1287 +      // in collectData, not in unpackInteractionData.
1288 +      snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw);
1289 +      snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw);
1290 +    }
1291 +    
1292 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1293 +      snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1294 +      snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1295 +    }
1296 +
1297 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1298 +      snap_->atomData.electricField[atom1] += *(idat.eField1);
1299 +      snap_->atomData.electricField[atom2] += *(idat.eField2);
1300 +    }
1301 +
1302 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1303 +      snap_->atomData.sitePotential[atom1] += *(idat.sPot1);
1304 +      snap_->atomData.sitePotential[atom2] += *(idat.sPot2);
1305 +    }
1306 +
1307 + #endif
1308 +    
1309 +  }
1310 +
1311 +  /*
1312 +   * buildNeighborList
1313 +   *
1314 +   * Constructs the Verlet neighbor list for a force-matrix
1315 +   * decomposition.  In this case, each processor is responsible for
1316 +   * row-site interactions with column-sites.
1317 +   *
1318 +   * neighborList is returned as a packed array of neighboring
1319 +   * column-ordered CutoffGroups.  The starting position in
1320 +   * neighborList for each row-ordered CutoffGroup is given by the
1321 +   * returned vector point.
1322 +   */
1323 +  void ForceMatrixDecomposition::buildNeighborList(vector<int>& neighborList,
1324 +                                                   vector<int>& point) {
1325 +    neighborList.clear();
1326 +    point.clear();
1327 +    int len = 0;
1328 +    
1329 +    bool doAllPairs = false;
1330 +
1331 +    Snapshot* snap_ = sman_->getCurrentSnapshot();
1332 +    Mat3x3d box;
1333 +    Mat3x3d invBox;
1334 +
1335 +    Vector3d rs, scaled, dr;
1336 +    Vector3i whichCell;
1337 +    int cellIndex;
1338 +
1339 + #ifdef IS_MPI
1340 +    cellListRow_.clear();
1341 +    cellListCol_.clear();
1342 +    point.resize(nGroupsInRow_+1);
1343 + #else
1344 +    cellList_.clear();
1345 +    point.resize(nGroups_+1);
1346 + #endif
1347 +    
1348 +    if (!usePeriodicBoundaryConditions_) {
1349 +      box = snap_->getBoundingBox();
1350 +      invBox = snap_->getInvBoundingBox();
1351 +    } else {
1352 +      box = snap_->getHmat();
1353 +      invBox = snap_->getInvHmat();
1354 +    }
1355 +    
1356 +    Vector3d A = box.getColumn(0);
1357 +    Vector3d B = box.getColumn(1);
1358 +    Vector3d C = box.getColumn(2);
1359 +
1360 +    // Required for triclinic cells
1361 +    Vector3d AxB = cross(A, B);
1362 +    Vector3d BxC = cross(B, C);
1363 +    Vector3d CxA = cross(C, A);
1364 +
1365 +    // unit vectors perpendicular to the faces of the triclinic cell:
1366 +    AxB.normalize();
1367 +    BxC.normalize();
1368 +    CxA.normalize();
1369 +
1370 +    // A set of perpendicular lengths in triclinic cells:
1371 +    RealType Wa = abs(dot(A, BxC));
1372 +    RealType Wb = abs(dot(B, CxA));
1373 +    RealType Wc = abs(dot(C, AxB));
1374 +    
1375 +    nCells_.x() = int( Wa / rList_ );
1376 +    nCells_.y() = int( Wb / rList_ );
1377 +    nCells_.z() = int( Wc / rList_ );
1378 +    
1379 +    // handle small boxes where the cell offsets can end up repeating cells
1380 +    if (nCells_.x() < 3) doAllPairs = true;
1381 +    if (nCells_.y() < 3) doAllPairs = true;
1382 +    if (nCells_.z() < 3) doAllPairs = true;
1383 +    
1384 +    int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1385 +    
1386 + #ifdef IS_MPI
1387 +    cellListRow_.resize(nCtot);
1388 +    cellListCol_.resize(nCtot);
1389 + #else
1390 +    cellList_.resize(nCtot);
1391 + #endif
1392 +    
1393 +    if (!doAllPairs) {
1394 +      
1395 + #ifdef IS_MPI
1396 +      
1397 +      for (int i = 0; i < nGroupsInRow_; i++) {
1398 +        rs = cgRowData.position[i];
1399 +        
1400 +        // scaled positions relative to the box vectors
1401 +        scaled = invBox * rs;
1402 +        
1403 +        // wrap the vector back into the unit box by subtracting integer box
1404 +        // numbers
1405 +        for (int j = 0; j < 3; j++) {
1406 +          scaled[j] -= roundMe(scaled[j]);
1407 +          scaled[j] += 0.5;
1408 +          // Handle the special case when an object is exactly on the
1409 +          // boundary (a scaled coordinate of 1.0 is the same as
1410 +          // scaled coordinate of 0.0)
1411 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1412 +        }
1413 +        
1414 +        // find xyz-indices of cell that cutoffGroup is in.
1415 +        whichCell.x() = nCells_.x() * scaled.x();
1416 +        whichCell.y() = nCells_.y() * scaled.y();
1417 +        whichCell.z() = nCells_.z() * scaled.z();
1418 +        
1419 +        // find single index of this cell:
1420 +        cellIndex = Vlinear(whichCell, nCells_);
1421 +        
1422 +        // add this cutoff group to the list of groups in this cell;
1423 +        cellListRow_[cellIndex].push_back(i);
1424 +      }
1425 +      for (int i = 0; i < nGroupsInCol_; i++) {
1426 +        rs = cgColData.position[i];
1427 +        
1428 +        // scaled positions relative to the box vectors
1429 +        scaled = invBox * rs;
1430 +        
1431 +        // wrap the vector back into the unit box by subtracting integer box
1432 +        // numbers
1433 +        for (int j = 0; j < 3; j++) {
1434 +          scaled[j] -= roundMe(scaled[j]);
1435 +          scaled[j] += 0.5;
1436 +          // Handle the special case when an object is exactly on the
1437 +          // boundary (a scaled coordinate of 1.0 is the same as
1438 +          // scaled coordinate of 0.0)
1439 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1440 +        }
1441 +        
1442 +        // find xyz-indices of cell that cutoffGroup is in.
1443 +        whichCell.x() = nCells_.x() * scaled.x();
1444 +        whichCell.y() = nCells_.y() * scaled.y();
1445 +        whichCell.z() = nCells_.z() * scaled.z();
1446 +        
1447 +        // find single index of this cell:
1448 +        cellIndex = Vlinear(whichCell, nCells_);
1449 +        
1450 +        // add this cutoff group to the list of groups in this cell;
1451 +        cellListCol_[cellIndex].push_back(i);
1452 +      }
1453 +            
1454 + #else
1455 +      for (int i = 0; i < nGroups_; i++) {
1456 +        rs = snap_->cgData.position[i];
1457 +        
1458 +        // scaled positions relative to the box vectors
1459 +        scaled = invBox * rs;
1460 +        
1461 +        // wrap the vector back into the unit box by subtracting integer box
1462 +        // numbers
1463 +        for (int j = 0; j < 3; j++) {
1464 +          scaled[j] -= roundMe(scaled[j]);
1465 +          scaled[j] += 0.5;
1466 +          // Handle the special case when an object is exactly on the
1467 +          // boundary (a scaled coordinate of 1.0 is the same as
1468 +          // scaled coordinate of 0.0)
1469 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1470 +        }
1471 +        
1472 +        // find xyz-indices of cell that cutoffGroup is in.
1473 +        whichCell.x() = int(nCells_.x() * scaled.x());
1474 +        whichCell.y() = int(nCells_.y() * scaled.y());
1475 +        whichCell.z() = int(nCells_.z() * scaled.z());
1476 +        
1477 +        // find single index of this cell:
1478 +        cellIndex = Vlinear(whichCell, nCells_);
1479 +        
1480 +        // add this cutoff group to the list of groups in this cell;
1481 +        cellList_[cellIndex].push_back(i);
1482 +      }
1483 +
1484 + #endif
1485 +
1486 + #ifdef IS_MPI
1487 +      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1488 +        rs = cgRowData.position[j1];
1489 + #else
1490 +
1491 +      for (int j1 = 0; j1 < nGroups_; j1++) {
1492 +        rs = snap_->cgData.position[j1];
1493 + #endif
1494 +        point[j1] = len;
1495 +        
1496 +        // scaled positions relative to the box vectors
1497 +        scaled = invBox * rs;
1498 +        
1499 +        // wrap the vector back into the unit box by subtracting integer box
1500 +        // numbers
1501 +        for (int j = 0; j < 3; j++) {
1502 +          scaled[j] -= roundMe(scaled[j]);
1503 +          scaled[j] += 0.5;
1504 +          // Handle the special case when an object is exactly on the
1505 +          // boundary (a scaled coordinate of 1.0 is the same as
1506 +          // scaled coordinate of 0.0)
1507 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1508 +        }
1509 +        
1510 +        // find xyz-indices of cell that cutoffGroup is in.
1511 +        whichCell.x() = nCells_.x() * scaled.x();
1512 +        whichCell.y() = nCells_.y() * scaled.y();
1513 +        whichCell.z() = nCells_.z() * scaled.z();
1514 +        
1515 +        for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1516 +             os != cellOffsets_.end(); ++os) {
1517 +              
1518 +          Vector3i m2v = whichCell + (*os);
1519 +
1520 +          if (m2v.x() >= nCells_.x()) {
1521 +            m2v.x() = 0;          
1522 +          } else if (m2v.x() < 0) {
1523 +            m2v.x() = nCells_.x() - 1;
1524 +          }
1525 +          
1526 +          if (m2v.y() >= nCells_.y()) {
1527 +            m2v.y() = 0;          
1528 +          } else if (m2v.y() < 0) {
1529 +            m2v.y() = nCells_.y() - 1;
1530 +          }
1531 +          
1532 +          if (m2v.z() >= nCells_.z()) {
1533 +            m2v.z() = 0;          
1534 +          } else if (m2v.z() < 0) {
1535 +            m2v.z() = nCells_.z() - 1;
1536 +          }
1537 +          int m2 = Vlinear (m2v, nCells_);                                      
1538 + #ifdef IS_MPI
1539 +          for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1540 +               j2 != cellListCol_[m2].end(); ++j2) {
1541 +            
1542 +            // In parallel, we need to visit *all* pairs of row
1543 +            // & column indicies and will divide labor in the
1544 +            // force evaluation later.
1545 +            dr = cgColData.position[(*j2)] - rs;
1546 +            if (usePeriodicBoundaryConditions_) {
1547 +              snap_->wrapVector(dr);
1548 +            }
1549 +            if (dr.lengthSquare() < rListSq_) {
1550 +              neighborList.push_back( (*j2) );
1551 +              ++len;
1552 +            }                
1553 +          }        
1554 + #else
1555 +          for (vector<int>::iterator j2 = cellList_[m2].begin();
1556 +               j2 != cellList_[m2].end(); ++j2) {
1557 +          
1558 +            // Always do this if we're in different cells or if
1559 +            // we're in the same cell and the global index of
1560 +            // the j2 cutoff group is greater than or equal to
1561 +            // the j1 cutoff group.  Note that Rappaport's code
1562 +            // has a "less than" conditional here, but that
1563 +            // deals with atom-by-atom computation.  OpenMD
1564 +            // allows atoms within a single cutoff group to
1565 +            // interact with each other.
1566 +            
1567 +            if ( (*j2) >= j1 ) {
1568 +              
1569 +              dr = snap_->cgData.position[(*j2)] - rs;
1570 +              if (usePeriodicBoundaryConditions_) {
1571 +                snap_->wrapVector(dr);
1572 +              }
1573 +              if ( dr.lengthSquare() < rListSq_) {
1574 +                neighborList.push_back( (*j2) );
1575 +                ++len;
1576 +              }
1577 +            }
1578 +          }                
1579 + #endif
1580 +        }
1581 +      }      
1582 +    } else {
1583 +      // branch to do all cutoff group pairs
1584 + #ifdef IS_MPI
1585 +      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1586 +        point[j1] = len;
1587 +        rs = cgRowData.position[j1];
1588 +        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1589 +          dr = cgColData.position[j2] - rs;
1590 +          if (usePeriodicBoundaryConditions_) {
1591 +            snap_->wrapVector(dr);
1592 +          }
1593 +          if (dr.lengthSquare() < rListSq_) {
1594 +            neighborList.push_back( j2 );
1595 +            ++len;
1596 +          }
1597 +        }
1598 +      }      
1599 + #else
1600 +      // include all groups here.
1601 +      for (int j1 = 0; j1 < nGroups_; j1++) {
1602 +        point[j1] = len;
1603 +        rs = snap_->cgData.position[j1];
1604 +        // include self group interactions j2 == j1
1605 +        for (int j2 = j1; j2 < nGroups_; j2++) {
1606 +          dr = snap_->cgData.position[j2] - rs;
1607 +          if (usePeriodicBoundaryConditions_) {
1608 +            snap_->wrapVector(dr);
1609 +          }
1610 +          if (dr.lengthSquare() < rListSq_) {
1611 +            neighborList.push_back( j2 );
1612 +            ++len;
1613 +          }
1614 +        }    
1615 +      }
1616 + #endif
1617 +    }
1618 +
1619 + #ifdef IS_MPI
1620 +    point[nGroupsInRow_] = len;
1621 + #else
1622 +    point[nGroups_] = len;
1623 + #endif
1624 +  
1625 +    // save the local cutoff group positions for the check that is
1626 +    // done on each loop:
1627 +    saved_CG_positions_.clear();
1628 +    saved_CG_positions_.reserve(nGroups_);
1629 +    for (int i = 0; i < nGroups_; i++)
1630 +      saved_CG_positions_.push_back(snap_->cgData.position[i]);
1631 +  }
1632   } //end namespace OpenMD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines