ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing:
branches/development/src/parallel/ForceMatrixDecomposition.cpp (file contents), Revision 1575 by gezelter, Fri Jun 3 21:39:49 2011 UTC vs.
trunk/src/parallel/ForceMatrixDecomposition.cpp (file contents), Revision 2061 by gezelter, Tue Mar 3 16:24:44 2015 UTC

# Line 35 | Line 35
35   *                                                                      
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38 < * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
39 < * [4]  Vardeman & Gezelter, in progress (2009).                        
38 > * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).          
39 > * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40 > * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42   #include "parallel/ForceMatrixDecomposition.hpp"
43   #include "math/SquareMatrix3.hpp"
# Line 47 | Line 48 | namespace OpenMD {
48   using namespace std;
49   namespace OpenMD {
50  
51 +  ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52 +
53 +    // Row and colum scans must visit all surrounding cells
54 +    cellOffsets_.clear();
55 +    cellOffsets_.push_back( Vector3i(-1,-1,-1) );
56 +    cellOffsets_.push_back( Vector3i( 0,-1,-1) );
57 +    cellOffsets_.push_back( Vector3i( 1,-1,-1) );                          
58 +    cellOffsets_.push_back( Vector3i(-1, 0,-1) );
59 +    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
60 +    cellOffsets_.push_back( Vector3i( 1, 0,-1) );
61 +    cellOffsets_.push_back( Vector3i(-1, 1,-1) );
62 +    cellOffsets_.push_back( Vector3i( 0, 1,-1) );      
63 +    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
64 +    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
65 +    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
66 +    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
67 +    cellOffsets_.push_back( Vector3i(-1, 0, 0) );      
68 +    cellOffsets_.push_back( Vector3i( 0, 0, 0) );
69 +    cellOffsets_.push_back( Vector3i( 1, 0, 0) );
70 +    cellOffsets_.push_back( Vector3i(-1, 1, 0) );
71 +    cellOffsets_.push_back( Vector3i( 0, 1, 0) );
72 +    cellOffsets_.push_back( Vector3i( 1, 1, 0) );
73 +    cellOffsets_.push_back( Vector3i(-1,-1, 1) );
74 +    cellOffsets_.push_back( Vector3i( 0,-1, 1) );
75 +    cellOffsets_.push_back( Vector3i( 1,-1, 1) );
76 +    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
77 +    cellOffsets_.push_back( Vector3i( 0, 0, 1) );
78 +    cellOffsets_.push_back( Vector3i( 1, 0, 1) );
79 +    cellOffsets_.push_back( Vector3i(-1, 1, 1) );
80 +    cellOffsets_.push_back( Vector3i( 0, 1, 1) );
81 +    cellOffsets_.push_back( Vector3i( 1, 1, 1) );
82 +  }
83 +
84 +
85    /**
86     * distributeInitialData is essentially a copy of the older fortran
87     * SimulationSetup
88     */
54  
89    void ForceMatrixDecomposition::distributeInitialData() {
90      snap_ = sman_->getCurrentSnapshot();
91      storageLayout_ = sman_->getStorageLayout();
92      ff_ = info_->getForceField();
93      nLocal_ = snap_->getNumberOfAtoms();
94 <    nGroups_ = snap_->getNumberOfCutoffGroups();
95 <
94 >  
95 >    nGroups_ = info_->getNLocalCutoffGroups();
96      // gather the information for atomtype IDs (atids):
97 <    identsLocal = info_->getIdentArray();
97 >    idents = info_->getIdentArray();
98 >    regions = info_->getRegions();
99      AtomLocalToGlobal = info_->getGlobalAtomIndices();
100      cgLocalToGlobal = info_->getGlobalGroupIndices();
101      vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
67    vector<RealType> massFactorsLocal = info_->getMassFactors();
68    PairList excludes = info_->getExcludedInteractions();
69    PairList oneTwo = info_->getOneTwoInteractions();
70    PairList oneThree = info_->getOneThreeInteractions();
71    PairList oneFour = info_->getOneFourInteractions();
102  
103 +    massFactors = info_->getMassFactors();
104 +
105 +    PairList* excludes = info_->getExcludedInteractions();
106 +    PairList* oneTwo = info_->getOneTwoInteractions();
107 +    PairList* oneThree = info_->getOneThreeInteractions();
108 +    PairList* oneFour = info_->getOneFourInteractions();
109 +    
110 +    if (needVelocities_)
111 +      snap_->cgData.setStorageLayout(DataStorage::dslPosition |
112 +                                     DataStorage::dslVelocity);
113 +    else
114 +      snap_->cgData.setStorageLayout(DataStorage::dslPosition);
115 +    
116   #ifdef IS_MPI
117  
118 <    AtomCommIntRow = new Communicator<Row,int>(nLocal_);
119 <    AtomCommRealRow = new Communicator<Row,RealType>(nLocal_);
77 <    AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_);
78 <    AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_);
79 <    AtomCommPotRow = new Communicator<Row,potVec>(nLocal_);
118 >    MPI_Comm row = rowComm.getComm();
119 >    MPI_Comm col = colComm.getComm();
120  
121 <    AtomCommIntColumn = new Communicator<Column,int>(nLocal_);
122 <    AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_);
123 <    AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_);
124 <    AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_);
125 <    AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_);
121 >    AtomPlanIntRow = new Plan<int>(row, nLocal_);
122 >    AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
123 >    AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_);
124 >    AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_);
125 >    AtomPlanPotRow = new Plan<potVec>(row, nLocal_);
126  
127 <    cgCommIntRow = new Communicator<Row,int>(nGroups_);
128 <    cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_);
129 <    cgCommIntColumn = new Communicator<Column,int>(nGroups_);
130 <    cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_);
127 >    AtomPlanIntColumn = new Plan<int>(col, nLocal_);
128 >    AtomPlanRealColumn = new Plan<RealType>(col, nLocal_);
129 >    AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_);
130 >    AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_);
131 >    AtomPlanPotColumn = new Plan<potVec>(col, nLocal_);
132  
133 <    nAtomsInRow_ = AtomCommIntRow->getSize();
134 <    nAtomsInCol_ = AtomCommIntColumn->getSize();
135 <    nGroupsInRow_ = cgCommIntRow->getSize();
136 <    nGroupsInCol_ = cgCommIntColumn->getSize();
133 >    cgPlanIntRow = new Plan<int>(row, nGroups_);
134 >    cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_);
135 >    cgPlanIntColumn = new Plan<int>(col, nGroups_);
136 >    cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_);
137  
138 +    nAtomsInRow_ = AtomPlanIntRow->getSize();
139 +    nAtomsInCol_ = AtomPlanIntColumn->getSize();
140 +    nGroupsInRow_ = cgPlanIntRow->getSize();
141 +    nGroupsInCol_ = cgPlanIntColumn->getSize();
142 +
143      // Modify the data storage objects with the correct layouts and sizes:
144      atomRowData.resize(nAtomsInRow_);
145      atomRowData.setStorageLayout(storageLayout_);
# Line 102 | Line 148 | namespace OpenMD {
148      cgRowData.resize(nGroupsInRow_);
149      cgRowData.setStorageLayout(DataStorage::dslPosition);
150      cgColData.resize(nGroupsInCol_);
151 <    cgColData.setStorageLayout(DataStorage::dslPosition);
152 <        
153 <    identsRow.reserve(nAtomsInRow_);
154 <    identsCol.reserve(nAtomsInCol_);
151 >    if (needVelocities_)
152 >      // we only need column velocities if we need them.
153 >      cgColData.setStorageLayout(DataStorage::dslPosition |
154 >                                 DataStorage::dslVelocity);
155 >    else    
156 >      cgColData.setStorageLayout(DataStorage::dslPosition);
157 >      
158 >    identsRow.resize(nAtomsInRow_);
159 >    identsCol.resize(nAtomsInCol_);
160      
161 <    AtomCommIntRow->gather(identsLocal, identsRow);
162 <    AtomCommIntColumn->gather(identsLocal, identsCol);
161 >    AtomPlanIntRow->gather(idents, identsRow);
162 >    AtomPlanIntColumn->gather(idents, identsCol);
163 >
164 >    regionsRow.resize(nAtomsInRow_);
165 >    regionsCol.resize(nAtomsInCol_);
166      
167 <    AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
168 <    AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
167 >    AtomPlanIntRow->gather(regions, regionsRow);
168 >    AtomPlanIntColumn->gather(regions, regionsCol);
169      
170 <    cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
171 <    cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
170 >    // allocate memory for the parallel objects
171 >    atypesRow.resize(nAtomsInRow_);
172 >    atypesCol.resize(nAtomsInCol_);
173  
174 <    AtomCommRealRow->gather(massFactorsLocal, massFactorsRow);
175 <    AtomCommRealColumn->gather(massFactorsLocal, massFactorsCol);
174 >    for (int i = 0; i < nAtomsInRow_; i++)
175 >      atypesRow[i] = ff_->getAtomType(identsRow[i]);
176 >    for (int i = 0; i < nAtomsInCol_; i++)
177 >      atypesCol[i] = ff_->getAtomType(identsCol[i]);        
178  
179 +    pot_row.resize(nAtomsInRow_);
180 +    pot_col.resize(nAtomsInCol_);
181 +
182 +    expot_row.resize(nAtomsInRow_);
183 +    expot_col.resize(nAtomsInCol_);
184 +
185 +    AtomRowToGlobal.resize(nAtomsInRow_);
186 +    AtomColToGlobal.resize(nAtomsInCol_);
187 +    AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal);
188 +    AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal);
189 +
190 +    cgRowToGlobal.resize(nGroupsInRow_);
191 +    cgColToGlobal.resize(nGroupsInCol_);
192 +    cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal);
193 +    cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal);
194 +
195 +    massFactorsRow.resize(nAtomsInRow_);
196 +    massFactorsCol.resize(nAtomsInCol_);
197 +    AtomPlanRealRow->gather(massFactors, massFactorsRow);
198 +    AtomPlanRealColumn->gather(massFactors, massFactorsCol);
199 +
200      groupListRow_.clear();
201 <    groupListRow_.reserve(nGroupsInRow_);
201 >    groupListRow_.resize(nGroupsInRow_);
202      for (int i = 0; i < nGroupsInRow_; i++) {
203        int gid = cgRowToGlobal[i];
204        for (int j = 0; j < nAtomsInRow_; j++) {
# Line 131 | Line 209 | namespace OpenMD {
209      }
210  
211      groupListCol_.clear();
212 <    groupListCol_.reserve(nGroupsInCol_);
212 >    groupListCol_.resize(nGroupsInCol_);
213      for (int i = 0; i < nGroupsInCol_; i++) {
214        int gid = cgColToGlobal[i];
215        for (int j = 0; j < nAtomsInCol_; j++) {
# Line 141 | Line 219 | namespace OpenMD {
219        }      
220      }
221  
222 <    skipsForRowAtom.clear();
223 <    skipsForRowAtom.reserve(nAtomsInRow_);
222 >    excludesForAtom.clear();
223 >    excludesForAtom.resize(nAtomsInRow_);
224 >    toposForAtom.clear();
225 >    toposForAtom.resize(nAtomsInRow_);
226 >    topoDist.clear();
227 >    topoDist.resize(nAtomsInRow_);
228      for (int i = 0; i < nAtomsInRow_; i++) {
229        int iglob = AtomRowToGlobal[i];
230 +
231        for (int j = 0; j < nAtomsInCol_; j++) {
232 <        int jglob = AtomColToGlobal[j];        
233 <        if (excludes.hasPair(iglob, jglob))
234 <          skipsForRowAtom[i].push_back(j);      
232 >        int jglob = AtomColToGlobal[j];
233 >
234 >        if (excludes->hasPair(iglob, jglob))
235 >          excludesForAtom[i].push_back(j);      
236 >        
237 >        if (oneTwo->hasPair(iglob, jglob)) {
238 >          toposForAtom[i].push_back(j);
239 >          topoDist[i].push_back(1);
240 >        } else {
241 >          if (oneThree->hasPair(iglob, jglob)) {
242 >            toposForAtom[i].push_back(j);
243 >            topoDist[i].push_back(2);
244 >          } else {
245 >            if (oneFour->hasPair(iglob, jglob)) {
246 >              toposForAtom[i].push_back(j);
247 >              topoDist[i].push_back(3);
248 >            }
249 >          }
250 >        }
251        }      
252      }
253  
254 <    toposForRowAtom.clear();
255 <    toposForRowAtom.reserve(nAtomsInRow_);
256 <    for (int i = 0; i < nAtomsInRow_; i++) {
257 <      int iglob = AtomRowToGlobal[i];
258 <      int nTopos = 0;
259 <      for (int j = 0; j < nAtomsInCol_; j++) {
260 <        int jglob = AtomColToGlobal[j];        
261 <        if (oneTwo.hasPair(iglob, jglob)) {
262 <          toposForRowAtom[i].push_back(j);
263 <          topoDistRow[i][nTopos] = 1;
264 <          nTopos++;
265 <        }
266 <        if (oneThree.hasPair(iglob, jglob)) {
267 <          toposForRowAtom[i].push_back(j);
268 <          topoDistRow[i][nTopos] = 2;
269 <          nTopos++;
270 <        }
271 <        if (oneFour.hasPair(iglob, jglob)) {
272 <          toposForRowAtom[i].push_back(j);
273 <          topoDistRow[i][nTopos] = 3;
274 <          nTopos++;
254 > #else
255 >    excludesForAtom.clear();
256 >    excludesForAtom.resize(nLocal_);
257 >    toposForAtom.clear();
258 >    toposForAtom.resize(nLocal_);
259 >    topoDist.clear();
260 >    topoDist.resize(nLocal_);
261 >
262 >    for (int i = 0; i < nLocal_; i++) {
263 >      int iglob = AtomLocalToGlobal[i];
264 >
265 >      for (int j = 0; j < nLocal_; j++) {
266 >        int jglob = AtomLocalToGlobal[j];
267 >
268 >        if (excludes->hasPair(iglob, jglob))
269 >          excludesForAtom[i].push_back(j);              
270 >        
271 >        if (oneTwo->hasPair(iglob, jglob)) {
272 >          toposForAtom[i].push_back(j);
273 >          topoDist[i].push_back(1);
274 >        } else {
275 >          if (oneThree->hasPair(iglob, jglob)) {
276 >            toposForAtom[i].push_back(j);
277 >            topoDist[i].push_back(2);
278 >          } else {
279 >            if (oneFour->hasPair(iglob, jglob)) {
280 >              toposForAtom[i].push_back(j);
281 >              topoDist[i].push_back(3);
282 >            }
283 >          }
284          }
285        }      
286      }
179
287   #endif
288  
289 +    // allocate memory for the parallel objects
290 +    atypesLocal.resize(nLocal_);
291 +
292 +    for (int i = 0; i < nLocal_; i++)
293 +      atypesLocal[i] = ff_->getAtomType(idents[i]);
294 +
295      groupList_.clear();
296 <    groupList_.reserve(nGroups_);
296 >    groupList_.resize(nGroups_);
297      for (int i = 0; i < nGroups_; i++) {
298        int gid = cgLocalToGlobal[i];
299        for (int j = 0; j < nLocal_; j++) {
300          int aid = AtomLocalToGlobal[j];
301 <        if (globalGroupMembership[aid] == gid)
301 >        if (globalGroupMembership[aid] == gid) {
302            groupList_[i].push_back(j);
190      }      
191    }
192
193    skipsForLocalAtom.clear();
194    skipsForLocalAtom.reserve(nLocal_);
195
196    for (int i = 0; i < nLocal_; i++) {
197      int iglob = AtomLocalToGlobal[i];
198      for (int j = 0; j < nLocal_; j++) {
199        int jglob = AtomLocalToGlobal[j];        
200        if (excludes.hasPair(iglob, jglob))
201          skipsForLocalAtom[i].push_back(j);      
202      }      
203    }
204
205    toposForLocalAtom.clear();
206    toposForLocalAtom.reserve(nLocal_);
207    for (int i = 0; i < nLocal_; i++) {
208      int iglob = AtomLocalToGlobal[i];
209      int nTopos = 0;
210      for (int j = 0; j < nLocal_; j++) {
211        int jglob = AtomLocalToGlobal[j];        
212        if (oneTwo.hasPair(iglob, jglob)) {
213          toposForLocalAtom[i].push_back(j);
214          topoDistLocal[i][nTopos] = 1;
215          nTopos++;
303          }
217        if (oneThree.hasPair(iglob, jglob)) {
218          toposForLocalAtom[i].push_back(j);
219          topoDistLocal[i][nTopos] = 2;
220          nTopos++;
221        }
222        if (oneFour.hasPair(iglob, jglob)) {
223          toposForLocalAtom[i].push_back(j);
224          topoDistLocal[i][nTopos] = 3;
225          nTopos++;
226        }
304        }      
305 <    }
305 >    }    
306    }
307 <  
307 >    
308 >  int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
309 >    for (unsigned int j = 0; j < toposForAtom[atom1].size(); j++) {
310 >      if (toposForAtom[atom1][j] == atom2)
311 >        return topoDist[atom1][j];
312 >    }                                          
313 >    return 0;
314 >  }
315 >
316    void ForceMatrixDecomposition::zeroWorkArrays() {
317 +    pairwisePot = 0.0;
318 +    embeddingPot = 0.0;
319 +    excludedPot = 0.0;
320 +    excludedSelfPot = 0.0;
321  
233    for (int j = 0; j < N_INTERACTION_FAMILIES; j++) {
234      longRangePot_[j] = 0.0;
235    }
236
322   #ifdef IS_MPI
323      if (storageLayout_ & DataStorage::dslForce) {
324        fill(atomRowData.force.begin(), atomRowData.force.end(), V3Zero);
# Line 249 | Line 334 | namespace OpenMD {
334           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
335  
336      fill(pot_col.begin(), pot_col.end(),
337 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
338 +
339 +    fill(expot_row.begin(), expot_row.end(),
340           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
253    
254    pot_local = Vector<RealType, N_INTERACTION_FAMILIES>(0.0);
341  
342 +    fill(expot_col.begin(), expot_col.end(),
343 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
344 +
345      if (storageLayout_ & DataStorage::dslParticlePot) {    
346 <      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0);
347 <      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0);
346 >      fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
347 >           0.0);
348 >      fill(atomColData.particlePot.begin(), atomColData.particlePot.end(),
349 >           0.0);
350      }
351  
352      if (storageLayout_ & DataStorage::dslDensity) {      
# Line 264 | Line 355 | namespace OpenMD {
355      }
356  
357      if (storageLayout_ & DataStorage::dslFunctional) {  
358 <      fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0);
359 <      fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0);
358 >      fill(atomRowData.functional.begin(), atomRowData.functional.end(),
359 >           0.0);
360 >      fill(atomColData.functional.begin(), atomColData.functional.end(),
361 >           0.0);
362      }
363  
364      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
# Line 275 | Line 368 | namespace OpenMD {
368             atomColData.functionalDerivative.end(), 0.0);
369      }
370  
371 < #else
372 <    
371 >    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
372 >      fill(atomRowData.skippedCharge.begin(),
373 >           atomRowData.skippedCharge.end(), 0.0);
374 >      fill(atomColData.skippedCharge.begin(),
375 >           atomColData.skippedCharge.end(), 0.0);
376 >    }
377 >
378 >    if (storageLayout_ & DataStorage::dslFlucQForce) {      
379 >      fill(atomRowData.flucQFrc.begin(),
380 >           atomRowData.flucQFrc.end(), 0.0);
381 >      fill(atomColData.flucQFrc.begin(),
382 >           atomColData.flucQFrc.end(), 0.0);
383 >    }
384 >
385 >    if (storageLayout_ & DataStorage::dslElectricField) {    
386 >      fill(atomRowData.electricField.begin(),
387 >           atomRowData.electricField.end(), V3Zero);
388 >      fill(atomColData.electricField.begin(),
389 >           atomColData.electricField.end(), V3Zero);
390 >    }
391 >
392 >    if (storageLayout_ & DataStorage::dslSitePotential) {    
393 >      fill(atomRowData.sitePotential.begin(),
394 >           atomRowData.sitePotential.end(), 0.0);
395 >      fill(atomColData.sitePotential.begin(),
396 >           atomColData.sitePotential.end(), 0.0);
397 >    }
398 >
399 > #endif
400 >    // even in parallel, we need to zero out the local arrays:
401 >
402      if (storageLayout_ & DataStorage::dslParticlePot) {      
403        fill(snap_->atomData.particlePot.begin(),
404             snap_->atomData.particlePot.end(), 0.0);
# Line 286 | Line 408 | namespace OpenMD {
408        fill(snap_->atomData.density.begin(),
409             snap_->atomData.density.end(), 0.0);
410      }
411 +
412      if (storageLayout_ & DataStorage::dslFunctional) {
413        fill(snap_->atomData.functional.begin(),
414             snap_->atomData.functional.end(), 0.0);
415      }
416 +
417      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
418        fill(snap_->atomData.functionalDerivative.begin(),
419             snap_->atomData.functionalDerivative.end(), 0.0);
420      }
421 < #endif
422 <    
421 >
422 >    if (storageLayout_ & DataStorage::dslSkippedCharge) {      
423 >      fill(snap_->atomData.skippedCharge.begin(),
424 >           snap_->atomData.skippedCharge.end(), 0.0);
425 >    }
426 >
427 >    if (storageLayout_ & DataStorage::dslElectricField) {      
428 >      fill(snap_->atomData.electricField.begin(),
429 >           snap_->atomData.electricField.end(), V3Zero);
430 >    }
431 >    if (storageLayout_ & DataStorage::dslSitePotential) {      
432 >      fill(snap_->atomData.sitePotential.begin(),
433 >           snap_->atomData.sitePotential.end(), 0.0);
434 >    }
435    }
436  
437  
438    void ForceMatrixDecomposition::distributeData()  {
439 +  
440 + #ifdef IS_MPI
441 +
442      snap_ = sman_->getCurrentSnapshot();
443      storageLayout_ = sman_->getStorageLayout();
305 #ifdef IS_MPI
444      
445 +    bool needsCG = true;
446 +    if(info_->getNCutoffGroups() != info_->getNAtoms())
447 +      needsCG = false;
448 +
449      // gather up the atomic positions
450 <    AtomCommVectorRow->gather(snap_->atomData.position,
450 >    AtomPlanVectorRow->gather(snap_->atomData.position,
451                                atomRowData.position);
452 <    AtomCommVectorColumn->gather(snap_->atomData.position,
452 >    AtomPlanVectorColumn->gather(snap_->atomData.position,
453                                   atomColData.position);
454      
455      // gather up the cutoff group positions
456 <    cgCommVectorRow->gather(snap_->cgData.position,
457 <                            cgRowData.position);
458 <    cgCommVectorColumn->gather(snap_->cgData.position,
459 <                               cgColData.position);
456 >
457 >    if (needsCG) {
458 >      cgPlanVectorRow->gather(snap_->cgData.position,
459 >                              cgRowData.position);
460 >      
461 >      cgPlanVectorColumn->gather(snap_->cgData.position,
462 >                                 cgColData.position);
463 >    }
464 >
465 >
466 >    if (needVelocities_) {
467 >      // gather up the atomic velocities
468 >      AtomPlanVectorColumn->gather(snap_->atomData.velocity,
469 >                                   atomColData.velocity);
470 >
471 >      if (needsCG) {        
472 >        cgPlanVectorColumn->gather(snap_->cgData.velocity,
473 >                                   cgColData.velocity);
474 >      }
475 >    }
476 >
477      
478      // if needed, gather the atomic rotation matrices
479      if (storageLayout_ & DataStorage::dslAmat) {
480 <      AtomCommMatrixRow->gather(snap_->atomData.aMat,
480 >      AtomPlanMatrixRow->gather(snap_->atomData.aMat,
481                                  atomRowData.aMat);
482 <      AtomCommMatrixColumn->gather(snap_->atomData.aMat,
482 >      AtomPlanMatrixColumn->gather(snap_->atomData.aMat,
483                                     atomColData.aMat);
484      }
485 <    
486 <    // if needed, gather the atomic eletrostatic frames
487 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
488 <      AtomCommMatrixRow->gather(snap_->atomData.electroFrame,
489 <                                atomRowData.electroFrame);
490 <      AtomCommMatrixColumn->gather(snap_->atomData.electroFrame,
491 <                                   atomColData.electroFrame);
485 >
486 >    // if needed, gather the atomic eletrostatic information
487 >    if (storageLayout_ & DataStorage::dslDipole) {
488 >      AtomPlanVectorRow->gather(snap_->atomData.dipole,
489 >                                atomRowData.dipole);
490 >      AtomPlanVectorColumn->gather(snap_->atomData.dipole,
491 >                                   atomColData.dipole);
492      }
493 +
494 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
495 +      AtomPlanMatrixRow->gather(snap_->atomData.quadrupole,
496 +                                atomRowData.quadrupole);
497 +      AtomPlanMatrixColumn->gather(snap_->atomData.quadrupole,
498 +                                   atomColData.quadrupole);
499 +    }
500 +        
501 +    // if needed, gather the atomic fluctuating charge values
502 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {
503 +      AtomPlanRealRow->gather(snap_->atomData.flucQPos,
504 +                              atomRowData.flucQPos);
505 +      AtomPlanRealColumn->gather(snap_->atomData.flucQPos,
506 +                                 atomColData.flucQPos);
507 +    }
508 +
509   #endif      
510    }
511    
# Line 344 | Line 519 | namespace OpenMD {
519      
520      if (storageLayout_ & DataStorage::dslDensity) {
521        
522 <      AtomCommRealRow->scatter(atomRowData.density,
522 >      AtomPlanRealRow->scatter(atomRowData.density,
523                                 snap_->atomData.density);
524        
525        int n = snap_->atomData.density.size();
526        vector<RealType> rho_tmp(n, 0.0);
527 <      AtomCommRealColumn->scatter(atomColData.density, rho_tmp);
527 >      AtomPlanRealColumn->scatter(atomColData.density, rho_tmp);
528        for (int i = 0; i < n; i++)
529          snap_->atomData.density[i] += rho_tmp[i];
530      }
531 +
532 +    // this isn't necessary if we don't have polarizable atoms, but
533 +    // we'll leave it here for now.
534 +    if (storageLayout_ & DataStorage::dslElectricField) {
535 +      
536 +      AtomPlanVectorRow->scatter(atomRowData.electricField,
537 +                                 snap_->atomData.electricField);
538 +      
539 +      int n = snap_->atomData.electricField.size();
540 +      vector<Vector3d> field_tmp(n, V3Zero);
541 +      AtomPlanVectorColumn->scatter(atomColData.electricField,
542 +                                    field_tmp);
543 +      for (int i = 0; i < n; i++)
544 +        snap_->atomData.electricField[i] += field_tmp[i];
545 +    }
546   #endif
547    }
548  
# Line 365 | Line 555 | namespace OpenMD {
555      storageLayout_ = sman_->getStorageLayout();
556   #ifdef IS_MPI
557      if (storageLayout_ & DataStorage::dslFunctional) {
558 <      AtomCommRealRow->gather(snap_->atomData.functional,
558 >      AtomPlanRealRow->gather(snap_->atomData.functional,
559                                atomRowData.functional);
560 <      AtomCommRealColumn->gather(snap_->atomData.functional,
560 >      AtomPlanRealColumn->gather(snap_->atomData.functional,
561                                   atomColData.functional);
562      }
563      
564      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
565 <      AtomCommRealRow->gather(snap_->atomData.functionalDerivative,
565 >      AtomPlanRealRow->gather(snap_->atomData.functionalDerivative,
566                                atomRowData.functionalDerivative);
567 <      AtomCommRealColumn->gather(snap_->atomData.functionalDerivative,
567 >      AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative,
568                                   atomColData.functionalDerivative);
569      }
570   #endif
# Line 388 | Line 578 | namespace OpenMD {
578      int n = snap_->atomData.force.size();
579      vector<Vector3d> frc_tmp(n, V3Zero);
580      
581 <    AtomCommVectorRow->scatter(atomRowData.force, frc_tmp);
581 >    AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp);
582      for (int i = 0; i < n; i++) {
583        snap_->atomData.force[i] += frc_tmp[i];
584        frc_tmp[i] = 0.0;
585      }
586      
587 <    AtomCommVectorColumn->scatter(atomColData.force, frc_tmp);
588 <    for (int i = 0; i < n; i++)
587 >    AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp);
588 >    for (int i = 0; i < n; i++) {
589        snap_->atomData.force[i] += frc_tmp[i];
590 <    
591 <    
590 >    }
591 >        
592      if (storageLayout_ & DataStorage::dslTorque) {
593  
594 <      int nt = snap_->atomData.force.size();
594 >      int nt = snap_->atomData.torque.size();
595        vector<Vector3d> trq_tmp(nt, V3Zero);
596  
597 <      AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp);
598 <      for (int i = 0; i < n; i++) {
597 >      AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp);
598 >      for (int i = 0; i < nt; i++) {
599          snap_->atomData.torque[i] += trq_tmp[i];
600          trq_tmp[i] = 0.0;
601        }
602        
603 <      AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp);
604 <      for (int i = 0; i < n; i++)
603 >      AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp);
604 >      for (int i = 0; i < nt; i++)
605          snap_->atomData.torque[i] += trq_tmp[i];
606      }
607 +
608 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
609 +
610 +      int ns = snap_->atomData.skippedCharge.size();
611 +      vector<RealType> skch_tmp(ns, 0.0);
612 +
613 +      AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp);
614 +      for (int i = 0; i < ns; i++) {
615 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
616 +        skch_tmp[i] = 0.0;
617 +      }
618 +      
619 +      AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
620 +      for (int i = 0; i < ns; i++)
621 +        snap_->atomData.skippedCharge[i] += skch_tmp[i];
622 +            
623 +    }
624      
625 +    if (storageLayout_ & DataStorage::dslFlucQForce) {
626 +
627 +      int nq = snap_->atomData.flucQFrc.size();
628 +      vector<RealType> fqfrc_tmp(nq, 0.0);
629 +
630 +      AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp);
631 +      for (int i = 0; i < nq; i++) {
632 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
633 +        fqfrc_tmp[i] = 0.0;
634 +      }
635 +      
636 +      AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp);
637 +      for (int i = 0; i < nq; i++)
638 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
639 +            
640 +    }
641 +
642 +    if (storageLayout_ & DataStorage::dslElectricField) {
643 +
644 +      int nef = snap_->atomData.electricField.size();
645 +      vector<Vector3d> efield_tmp(nef, V3Zero);
646 +
647 +      AtomPlanVectorRow->scatter(atomRowData.electricField, efield_tmp);
648 +      for (int i = 0; i < nef; i++) {
649 +        snap_->atomData.electricField[i] += efield_tmp[i];
650 +        efield_tmp[i] = 0.0;
651 +      }
652 +      
653 +      AtomPlanVectorColumn->scatter(atomColData.electricField, efield_tmp);
654 +      for (int i = 0; i < nef; i++)
655 +        snap_->atomData.electricField[i] += efield_tmp[i];
656 +    }
657 +
658 +    if (storageLayout_ & DataStorage::dslSitePotential) {
659 +
660 +      int nsp = snap_->atomData.sitePotential.size();
661 +      vector<RealType> sp_tmp(nsp, 0.0);
662 +
663 +      AtomPlanRealRow->scatter(atomRowData.sitePotential, sp_tmp);
664 +      for (int i = 0; i < nsp; i++) {
665 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
666 +        sp_tmp[i] = 0.0;
667 +      }
668 +      
669 +      AtomPlanRealColumn->scatter(atomColData.sitePotential, sp_tmp);
670 +      for (int i = 0; i < nsp; i++)
671 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
672 +    }
673 +
674      nLocal_ = snap_->getNumberOfAtoms();
675  
676      vector<potVec> pot_temp(nLocal_,
677                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
678 +    vector<potVec> expot_temp(nLocal_,
679 +                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
680  
681      // scatter/gather pot_row into the members of my column
682            
683 <    AtomCommPotRow->scatter(pot_row, pot_temp);
683 >    AtomPlanPotRow->scatter(pot_row, pot_temp);
684 >    AtomPlanPotRow->scatter(expot_row, expot_temp);
685  
686 <    for (int ii = 0;  ii < pot_temp.size(); ii++ )
687 <      pot_local += pot_temp[ii];
688 <    
686 >    for (int ii = 0;  ii < pot_temp.size(); ii++ )
687 >      pairwisePot += pot_temp[ii];
688 >
689 >    for (int ii = 0;  ii < expot_temp.size(); ii++ )
690 >      excludedPot += expot_temp[ii];
691 >        
692 >    if (storageLayout_ & DataStorage::dslParticlePot) {
693 >      // This is the pairwise contribution to the particle pot.  The
694 >      // embedding contribution is added in each of the low level
695 >      // non-bonded routines.  In single processor, this is done in
696 >      // unpackInteractionData, not in collectData.
697 >      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
698 >        for (int i = 0; i < nLocal_; i++) {
699 >          // factor of two is because the total potential terms are divided
700 >          // by 2 in parallel due to row/ column scatter      
701 >          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
702 >        }
703 >      }
704 >    }
705 >
706      fill(pot_temp.begin(), pot_temp.end(),
707           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
708 +    fill(expot_temp.begin(), expot_temp.end(),
709 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
710        
711 <    AtomCommPotColumn->scatter(pot_col, pot_temp);    
711 >    AtomPlanPotColumn->scatter(pot_col, pot_temp);    
712 >    AtomPlanPotColumn->scatter(expot_col, expot_temp);    
713      
714      for (int ii = 0;  ii < pot_temp.size(); ii++ )
715 <      pot_local += pot_temp[ii];
715 >      pairwisePot += pot_temp[ii];    
716 >
717 >    for (int ii = 0;  ii < expot_temp.size(); ii++ )
718 >      excludedPot += expot_temp[ii];    
719 >
720 >    if (storageLayout_ & DataStorage::dslParticlePot) {
721 >      // This is the pairwise contribution to the particle pot.  The
722 >      // embedding contribution is added in each of the low level
723 >      // non-bonded routines.  In single processor, this is done in
724 >      // unpackInteractionData, not in collectData.
725 >      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
726 >        for (int i = 0; i < nLocal_; i++) {
727 >          // factor of two is because the total potential terms are divided
728 >          // by 2 in parallel due to row/ column scatter      
729 >          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
730 >        }
731 >      }
732 >    }
733      
734 +    if (storageLayout_ & DataStorage::dslParticlePot) {
735 +      int npp = snap_->atomData.particlePot.size();
736 +      vector<RealType> ppot_temp(npp, 0.0);
737 +
738 +      // This is the direct or embedding contribution to the particle
739 +      // pot.
740 +      
741 +      AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp);
742 +      for (int i = 0; i < npp; i++) {
743 +        snap_->atomData.particlePot[i] += ppot_temp[i];
744 +      }
745 +
746 +      fill(ppot_temp.begin(), ppot_temp.end(), 0.0);
747 +      
748 +      AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp);
749 +      for (int i = 0; i < npp; i++) {
750 +        snap_->atomData.particlePot[i] += ppot_temp[i];
751 +      }
752 +    }
753 +
754 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
755 +      RealType ploc1 = pairwisePot[ii];
756 +      RealType ploc2 = 0.0;
757 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
758 +      pairwisePot[ii] = ploc2;
759 +    }
760 +
761 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
762 +      RealType ploc1 = excludedPot[ii];
763 +      RealType ploc2 = 0.0;
764 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
765 +      excludedPot[ii] = ploc2;
766 +    }
767 +
768 +    // Here be dragons.
769 +    MPI_Comm col = colComm.getComm();
770 +
771 +    MPI_Allreduce(MPI_IN_PLACE,
772 +                  &snap_->frameData.conductiveHeatFlux[0], 3,
773 +                  MPI_REALTYPE, MPI_SUM, col);
774 +
775 +
776   #endif
777 +
778    }
779  
780 <  int ForceMatrixDecomposition::getNAtomsInRow() {  
780 >  /**
781 >   * Collects information obtained during the post-pair (and embedding
782 >   * functional) loops onto local data structures.
783 >   */
784 >  void ForceMatrixDecomposition::collectSelfData() {
785 >    snap_ = sman_->getCurrentSnapshot();
786 >    storageLayout_ = sman_->getStorageLayout();
787 >
788   #ifdef IS_MPI
789 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
790 +      RealType ploc1 = embeddingPot[ii];
791 +      RealType ploc2 = 0.0;
792 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
793 +      embeddingPot[ii] = ploc2;
794 +    }    
795 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
796 +      RealType ploc1 = excludedSelfPot[ii];
797 +      RealType ploc2 = 0.0;
798 +      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
799 +      excludedSelfPot[ii] = ploc2;
800 +    }    
801 + #endif
802 +    
803 +  }
804 +
805 +
806 +
807 +  int& ForceMatrixDecomposition::getNAtomsInRow() {  
808 + #ifdef IS_MPI
809      return nAtomsInRow_;
810   #else
811      return nLocal_;
# Line 449 | Line 815 | namespace OpenMD {
815    /**
816     * returns the list of atoms belonging to this group.  
817     */
818 <  vector<int> ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
818 >  vector<int>& ForceMatrixDecomposition::getAtomsInGroupRow(int cg1){
819   #ifdef IS_MPI
820      return groupListRow_[cg1];
821   #else
# Line 457 | Line 823 | namespace OpenMD {
823   #endif
824    }
825  
826 <  vector<int> ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
826 >  vector<int>& ForceMatrixDecomposition::getAtomsInGroupColumn(int cg2){
827   #ifdef IS_MPI
828      return groupListCol_[cg2];
829   #else
# Line 474 | Line 840 | namespace OpenMD {
840      d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1];
841   #endif
842      
843 <    snap_->wrapVector(d);
843 >    if (usePeriodicBoundaryConditions_) {
844 >      snap_->wrapVector(d);
845 >    }
846      return d;    
847    }
848  
849 +  Vector3d& ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){
850 + #ifdef IS_MPI
851 +    return cgColData.velocity[cg2];
852 + #else
853 +    return snap_->cgData.velocity[cg2];
854 + #endif
855 +  }
856  
857 +  Vector3d& ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){
858 + #ifdef IS_MPI
859 +    return atomColData.velocity[atom2];
860 + #else
861 +    return snap_->atomData.velocity[atom2];
862 + #endif
863 +  }
864 +
865 +
866    Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
867  
868      Vector3d d;
# Line 488 | Line 872 | namespace OpenMD {
872   #else
873      d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1];
874   #endif
875 <
876 <    snap_->wrapVector(d);
875 >    if (usePeriodicBoundaryConditions_) {
876 >      snap_->wrapVector(d);
877 >    }
878      return d;    
879    }
880    
# Line 501 | Line 886 | namespace OpenMD {
886   #else
887      d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2];
888   #endif
889 <    
890 <    snap_->wrapVector(d);
889 >    if (usePeriodicBoundaryConditions_) {
890 >      snap_->wrapVector(d);
891 >    }
892      return d;    
893    }
894  
895 <  RealType ForceMatrixDecomposition::getMassFactorRow(int atom1) {
895 >  RealType& ForceMatrixDecomposition::getMassFactorRow(int atom1) {
896   #ifdef IS_MPI
897      return massFactorsRow[atom1];
898   #else
899 <    return massFactorsLocal[atom1];
899 >    return massFactors[atom1];
900   #endif
901    }
902  
903 <  RealType ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
903 >  RealType& ForceMatrixDecomposition::getMassFactorColumn(int atom2) {
904   #ifdef IS_MPI
905      return massFactorsCol[atom2];
906   #else
907 <    return massFactorsLocal[atom2];
907 >    return massFactors[atom2];
908   #endif
909  
910    }
# Line 531 | Line 917 | namespace OpenMD {
917   #else
918      d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1];
919   #endif
920 <
921 <    snap_->wrapVector(d);
920 >    if (usePeriodicBoundaryConditions_) {
921 >      snap_->wrapVector(d);
922 >    }
923      return d;    
924    }
925  
926 <  vector<int> ForceMatrixDecomposition::getSkipsForRowAtom(int atom1) {
927 < #ifdef IS_MPI
541 <    return skipsForRowAtom[atom1];
542 < #else
543 <    return skipsForLocalAtom[atom1];
544 < #endif
926 >  vector<int>& ForceMatrixDecomposition::getExcludesForAtom(int atom1) {
927 >    return excludesForAtom[atom1];
928    }
929  
930    /**
931 <   * There are a number of reasons to skip a pair or a
549 <   * particle. Mostly we do this to exclude atoms who are involved in
550 <   * short range interactions (bonds, bends, torsions), but we also
551 <   * need to exclude some overcounted interactions that result from
931 >   * We need to exclude some overcounted interactions that result from
932     * the parallel decomposition.
933     */
934 <  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
934 >  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) {
935      int unique_id_1, unique_id_2;
936 <
936 >        
937   #ifdef IS_MPI
938      // in MPI, we have to look up the unique IDs for each atom
939      unique_id_1 = AtomRowToGlobal[atom1];
940      unique_id_2 = AtomColToGlobal[atom2];
941 +    // group1 = cgRowToGlobal[cg1];
942 +    // group2 = cgColToGlobal[cg2];
943 + #else
944 +    unique_id_1 = AtomLocalToGlobal[atom1];
945 +    unique_id_2 = AtomLocalToGlobal[atom2];
946 +    int group1 = cgLocalToGlobal[cg1];
947 +    int group2 = cgLocalToGlobal[cg2];
948 + #endif  
949  
562    // this situation should only arise in MPI simulations
950      if (unique_id_1 == unique_id_2) return true;
951 <    
951 >
952 > #ifdef IS_MPI
953      // this prevents us from doing the pair on multiple processors
954      if (unique_id_1 < unique_id_2) {
955        if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
956      } else {
957 <      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
957 >      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
958      }
959 < #else
960 <    // in the normal loop, the atom numbers are unique
961 <    unique_id_1 = atom1;
962 <    unique_id_2 = atom2;
959 > #endif    
960 >
961 > #ifndef IS_MPI
962 >    if (group1 == group2) {
963 >      if (unique_id_1 < unique_id_2) return true;
964 >    }
965   #endif
966      
967 < #ifdef IS_MPI
578 <    for (vector<int>::iterator i = skipsForRowAtom[atom1].begin();
579 <         i != skipsForRowAtom[atom1].end(); ++i) {
580 <      if ( (*i) == unique_id_2 ) return true;
581 <    }    
582 < #else
583 <    for (vector<int>::iterator i = skipsForLocalAtom[atom1].begin();
584 <         i != skipsForLocalAtom[atom1].end(); ++i) {
585 <      if ( (*i) == unique_id_2 ) return true;
586 <    }    
587 < #endif
967 >    return false;
968    }
969  
970 <  int ForceMatrixDecomposition::getTopoDistance(int atom1, int atom2) {
971 <    
972 < #ifdef IS_MPI
973 <    for (int i = 0; i < toposForRowAtom[atom1].size(); i++) {
974 <      if ( toposForRowAtom[atom1][i] == atom2 ) return topoDistRow[atom1][i];
975 <    }
976 < #else
977 <    for (int i = 0; i < toposForLocalAtom[atom1].size(); i++) {
978 <      if ( toposForLocalAtom[atom1][i] == atom2 ) return topoDistLocal[atom1][i];
970 >  /**
971 >   * We need to handle the interactions for atoms who are involved in
972 >   * the same rigid body as well as some short range interactions
973 >   * (bonds, bends, torsions) differently from other interactions.
974 >   * We'll still visit the pairwise routines, but with a flag that
975 >   * tells those routines to exclude the pair from direct long range
976 >   * interactions.  Some indirect interactions (notably reaction
977 >   * field) must still be handled for these pairs.
978 >   */
979 >  bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
980 >
981 >    // excludesForAtom was constructed to use row/column indices in the MPI
982 >    // version, and to use local IDs in the non-MPI version:
983 >    
984 >    for (vector<int>::iterator i = excludesForAtom[atom1].begin();
985 >         i != excludesForAtom[atom1].end(); ++i) {
986 >      if ( (*i) == atom2 ) return true;
987      }
600 #endif
988  
989 <    // zero is default for unconnected (i.e. normal) pair interactions
603 <    return 0;
989 >    return false;
990    }
991  
992 +
993    void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){
994   #ifdef IS_MPI
995      atomRowData.force[atom1] += fg;
# Line 620 | Line 1007 | namespace OpenMD {
1007    }
1008  
1009      // filling interaction blocks with pointers
1010 <  InteractionData ForceMatrixDecomposition::fillInteractionData(int atom1, int atom2) {    
1011 <    InteractionData idat;
1010 >  void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
1011 >                                                     int atom1, int atom2,
1012 >                                                     bool newAtom1) {
1013  
1014 +    idat.excluded = excludeAtomPair(atom1, atom2);
1015 +
1016 +    if (newAtom1) {
1017 +      
1018   #ifdef IS_MPI
1019 <    
1020 <    idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
1021 <                             ff_->getAtomType(identsCol[atom2]) );
1019 >      idat.atid1 = identsRow[atom1];
1020 >      idat.atid2 = identsCol[atom2];
1021 >      
1022 >      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1023 >        idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1024 >      } else {
1025 >        idat.sameRegion = false;
1026 >      }
1027 >      
1028 >      if (storageLayout_ & DataStorage::dslAmat) {
1029 >        idat.A1 = &(atomRowData.aMat[atom1]);
1030 >        idat.A2 = &(atomColData.aMat[atom2]);
1031 >      }
1032 >      
1033 >      if (storageLayout_ & DataStorage::dslTorque) {
1034 >        idat.t1 = &(atomRowData.torque[atom1]);
1035 >        idat.t2 = &(atomColData.torque[atom2]);
1036 >      }
1037 >      
1038 >      if (storageLayout_ & DataStorage::dslDipole) {
1039 >        idat.dipole1 = &(atomRowData.dipole[atom1]);
1040 >        idat.dipole2 = &(atomColData.dipole[atom2]);
1041 >      }
1042 >      
1043 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1044 >        idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1045 >        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1046 >      }
1047 >      
1048 >      if (storageLayout_ & DataStorage::dslDensity) {
1049 >        idat.rho1 = &(atomRowData.density[atom1]);
1050 >        idat.rho2 = &(atomColData.density[atom2]);
1051 >      }
1052 >      
1053 >      if (storageLayout_ & DataStorage::dslFunctional) {
1054 >        idat.frho1 = &(atomRowData.functional[atom1]);
1055 >        idat.frho2 = &(atomColData.functional[atom2]);
1056 >      }
1057 >      
1058 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1059 >        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1060 >        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1061 >      }
1062 >      
1063 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1064 >        idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1065 >        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1066 >      }
1067 >      
1068 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1069 >        idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1070 >        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1071 >      }
1072 >      
1073 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1074 >        idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1075 >        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1076 >      }
1077 >      
1078 > #else
1079 >      
1080 >      idat.atid1 = idents[atom1];
1081 >      idat.atid2 = idents[atom2];
1082 >      
1083 >      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1084 >        idat.sameRegion = (regions[atom1] == regions[atom2]);
1085 >      } else {
1086 >        idat.sameRegion = false;
1087 >      }
1088 >      
1089 >      if (storageLayout_ & DataStorage::dslAmat) {
1090 >        idat.A1 = &(snap_->atomData.aMat[atom1]);
1091 >        idat.A2 = &(snap_->atomData.aMat[atom2]);
1092 >      }
1093 >      
1094 >      if (storageLayout_ & DataStorage::dslTorque) {
1095 >        idat.t1 = &(snap_->atomData.torque[atom1]);
1096 >        idat.t2 = &(snap_->atomData.torque[atom2]);
1097 >      }
1098 >      
1099 >      if (storageLayout_ & DataStorage::dslDipole) {
1100 >        idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1101 >        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1102 >      }
1103 >      
1104 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1105 >        idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1106 >        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1107 >      }
1108 >      
1109 >      if (storageLayout_ & DataStorage::dslDensity) {    
1110 >        idat.rho1 = &(snap_->atomData.density[atom1]);
1111 >        idat.rho2 = &(snap_->atomData.density[atom2]);
1112 >      }
1113 >      
1114 >      if (storageLayout_ & DataStorage::dslFunctional) {
1115 >        idat.frho1 = &(snap_->atomData.functional[atom1]);
1116 >        idat.frho2 = &(snap_->atomData.functional[atom2]);
1117 >      }
1118 >      
1119 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1120 >        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1121 >        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1122 >      }
1123 >      
1124 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1125 >        idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1126 >        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1127 >      }
1128 >      
1129 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1130 >        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1131 >        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1132 >      }
1133 >      
1134 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1135 >        idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1136 >        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1137 >      }
1138 > #endif
1139 >      
1140 >    } else {
1141 >      // atom1 is not new, so don't bother updating properties of that atom:
1142 > #ifdef IS_MPI
1143 >    idat.atid2 = identsCol[atom2];
1144  
1145 <    
1145 >    if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1146 >      idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1147 >    } else {
1148 >      idat.sameRegion = false;
1149 >    }
1150 >
1151      if (storageLayout_ & DataStorage::dslAmat) {
633      idat.A1 = &(atomRowData.aMat[atom1]);
1152        idat.A2 = &(atomColData.aMat[atom2]);
1153      }
1154      
637    if (storageLayout_ & DataStorage::dslElectroFrame) {
638      idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
639      idat.eFrame2 = &(atomColData.electroFrame[atom2]);
640    }
641
1155      if (storageLayout_ & DataStorage::dslTorque) {
643      idat.t1 = &(atomRowData.torque[atom1]);
1156        idat.t2 = &(atomColData.torque[atom2]);
1157      }
1158  
1159 +    if (storageLayout_ & DataStorage::dslDipole) {
1160 +      idat.dipole2 = &(atomColData.dipole[atom2]);
1161 +    }
1162 +
1163 +    if (storageLayout_ & DataStorage::dslQuadrupole) {
1164 +      idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1165 +    }
1166 +
1167      if (storageLayout_ & DataStorage::dslDensity) {
648      idat.rho1 = &(atomRowData.density[atom1]);
1168        idat.rho2 = &(atomColData.density[atom2]);
1169      }
1170  
1171      if (storageLayout_ & DataStorage::dslFunctional) {
653      idat.frho1 = &(atomRowData.functional[atom1]);
1172        idat.frho2 = &(atomColData.functional[atom2]);
1173      }
1174  
1175      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
658      idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1176        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1177      }
1178  
1179      if (storageLayout_ & DataStorage::dslParticlePot) {
663      idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1180        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1181      }
1182  
1183 < #else
1183 >    if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1184 >      idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1185 >    }
1186  
1187 <    idat.atypes = make_pair( ff_->getAtomType(identsLocal[atom1]),
1188 <                             ff_->getAtomType(identsLocal[atom2]) );
1187 >    if (storageLayout_ & DataStorage::dslFlucQPosition) {
1188 >      idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1189 >    }
1190  
1191 <    if (storageLayout_ & DataStorage::dslAmat) {
1192 <      idat.A1 = &(snap_->atomData.aMat[atom1]);
1193 <      idat.A2 = &(snap_->atomData.aMat[atom2]);
1191 > #else  
1192 >    idat.atid2 = idents[atom2];
1193 >
1194 >    if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1195 >      idat.sameRegion = (regions[atom1] == regions[atom2]);
1196 >    } else {
1197 >      idat.sameRegion = false;
1198      }
1199  
1200 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
1201 <      idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
679 <      idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
1200 >    if (storageLayout_ & DataStorage::dslAmat) {
1201 >      idat.A2 = &(snap_->atomData.aMat[atom2]);
1202      }
1203  
1204      if (storageLayout_ & DataStorage::dslTorque) {
683      idat.t1 = &(snap_->atomData.torque[atom1]);
1205        idat.t2 = &(snap_->atomData.torque[atom2]);
1206      }
1207  
1208 <    if (storageLayout_ & DataStorage::dslDensity) {
1209 <      idat.rho1 = &(snap_->atomData.density[atom1]);
1208 >    if (storageLayout_ & DataStorage::dslDipole) {
1209 >      idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1210 >    }
1211 >
1212 >    if (storageLayout_ & DataStorage::dslQuadrupole) {
1213 >      idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1214 >    }
1215 >
1216 >    if (storageLayout_ & DataStorage::dslDensity) {    
1217        idat.rho2 = &(snap_->atomData.density[atom2]);
1218      }
1219  
1220      if (storageLayout_ & DataStorage::dslFunctional) {
693      idat.frho1 = &(snap_->atomData.functional[atom1]);
1221        idat.frho2 = &(snap_->atomData.functional[atom2]);
1222      }
1223  
1224      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
698      idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1225        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1226      }
1227  
1228      if (storageLayout_ & DataStorage::dslParticlePot) {
703      idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1229        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1230      }
1231  
1232 +    if (storageLayout_ & DataStorage::dslSkippedCharge) {
1233 +      idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1234 +    }
1235 +
1236 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1237 +      idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1238 +    }
1239 +
1240   #endif
1241 <    return idat;
1241 >    }
1242    }
710
1243    
1244 <  void ForceMatrixDecomposition::unpackInteractionData(InteractionData idat, int atom1, int atom2) {    
1244 >  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat,
1245 >                                                       int atom1, int atom2) {  
1246   #ifdef IS_MPI
1247 <    pot_row[atom1] += 0.5 *  *(idat.pot);
1248 <    pot_col[atom2] += 0.5 *  *(idat.pot);
1247 >    pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1248 >    pot_col[atom2] += RealType(0.5) *  *(idat.pot);
1249 >    expot_row[atom1] += RealType(0.5) *  *(idat.excludedPot);
1250 >    expot_col[atom2] += RealType(0.5) *  *(idat.excludedPot);
1251  
1252      atomRowData.force[atom1] += *(idat.f1);
1253      atomColData.force[atom2] -= *(idat.f1);
719 #else
720    longRangePot_ += *(idat.pot);
721    
722    snap_->atomData.force[atom1] += *(idat.f1);
723    snap_->atomData.force[atom2] -= *(idat.f1);
724 #endif
1254  
1255 <  }
1255 >    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1256 >      atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1257 >      atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1258 >    }
1259  
1260 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1261 +      atomRowData.electricField[atom1] += *(idat.eField1);
1262 +      atomColData.electricField[atom2] += *(idat.eField2);
1263 +    }
1264  
1265 <  InteractionData ForceMatrixDecomposition::fillSkipData(int atom1, int atom2){
1265 >    if (storageLayout_ & DataStorage::dslSitePotential) {              
1266 >      atomRowData.sitePotential[atom1] += *(idat.sPot1);
1267 >      atomColData.sitePotential[atom2] += *(idat.sPot2);
1268 >    }
1269  
1270 <    InteractionData idat;
1271 < #ifdef IS_MPI
1272 <    idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
734 <                             ff_->getAtomType(identsCol[atom2]) );
1270 > #else
1271 >    pairwisePot += *(idat.pot);
1272 >    excludedPot += *(idat.excludedPot);
1273  
1274 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
1275 <      idat.eFrame1 = &(atomRowData.electroFrame[atom1]);
1276 <      idat.eFrame2 = &(atomColData.electroFrame[atom2]);
1274 >    snap_->atomData.force[atom1] += *(idat.f1);
1275 >    snap_->atomData.force[atom2] -= *(idat.f1);
1276 >
1277 >    if (idat.doParticlePot) {
1278 >      // This is the pairwise contribution to the particle pot.  The
1279 >      // embedding contribution is added in each of the low level
1280 >      // non-bonded routines.  In parallel, this calculation is done
1281 >      // in collectData, not in unpackInteractionData.
1282 >      snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw);
1283 >      snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw);
1284      }
1285 <    if (storageLayout_ & DataStorage::dslTorque) {
1286 <      idat.t1 = &(atomRowData.torque[atom1]);
1287 <      idat.t2 = &(atomColData.torque[atom2]);
1285 >    
1286 >    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1287 >      snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1288 >      snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1289      }
744 #else
745    idat.atypes = make_pair( ff_->getAtomType(identsLocal[atom1]),
746                             ff_->getAtomType(identsLocal[atom2]) );
1290  
1291 <    if (storageLayout_ & DataStorage::dslElectroFrame) {
1292 <      idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]);
1293 <      idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]);
1291 >    if (storageLayout_ & DataStorage::dslElectricField) {              
1292 >      snap_->atomData.electricField[atom1] += *(idat.eField1);
1293 >      snap_->atomData.electricField[atom2] += *(idat.eField2);
1294      }
1295 <    if (storageLayout_ & DataStorage::dslTorque) {
1296 <      idat.t1 = &(snap_->atomData.torque[atom1]);
1297 <      idat.t2 = &(snap_->atomData.torque[atom2]);
1295 >
1296 >    if (storageLayout_ & DataStorage::dslSitePotential) {              
1297 >      snap_->atomData.sitePotential[atom1] += *(idat.sPot1);
1298 >      snap_->atomData.sitePotential[atom2] += *(idat.sPot2);
1299      }
1300 < #endif    
1300 >
1301 > #endif
1302 >    
1303    }
1304  
1305    /*
1306     * buildNeighborList
1307     *
1308 <   * first element of pair is row-indexed CutoffGroup
1309 <   * second element of pair is column-indexed CutoffGroup
1308 >   * Constructs the Verlet neighbor list for a force-matrix
1309 >   * decomposition.  In this case, each processor is responsible for
1310 >   * row-site interactions with column-sites.
1311 >   *
1312 >   * neighborList is returned as a packed array of neighboring
1313 >   * column-ordered CutoffGroups.  The starting position in
1314 >   * neighborList for each row-ordered CutoffGroup is given by the
1315 >   * returned vector point.
1316     */
1317 <  vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1318 <      
1319 <    vector<pair<int, int> > neighborList;
1317 >  void ForceMatrixDecomposition::buildNeighborList(vector<int>& neighborList,
1318 >                                                   vector<int>& point) {
1319 >    neighborList.clear();
1320 >    point.clear();
1321 >    int len = 0;
1322 >    
1323 >    bool doAllPairs = false;
1324 >
1325 >    Snapshot* snap_ = sman_->getCurrentSnapshot();
1326 >    Mat3x3d box;
1327 >    Mat3x3d invBox;
1328 >
1329 >    Vector3d rs, scaled, dr;
1330 >    Vector3i whichCell;
1331 >    int cellIndex;
1332 >
1333   #ifdef IS_MPI
1334      cellListRow_.clear();
1335      cellListCol_.clear();
1336 +    point.resize(nGroupsInRow_+1);
1337   #else
1338      cellList_.clear();
1339 +    point.resize(nGroups_+1);
1340   #endif
1341 +    
1342 +    if (!usePeriodicBoundaryConditions_) {
1343 +      box = snap_->getBoundingBox();
1344 +      invBox = snap_->getInvBoundingBox();
1345 +    } else {
1346 +      box = snap_->getHmat();
1347 +      invBox = snap_->getInvHmat();
1348 +    }
1349 +    
1350 +    Vector3d A = box.getColumn(0);
1351 +    Vector3d B = box.getColumn(1);
1352 +    Vector3d C = box.getColumn(2);
1353  
1354 <    // dangerous to not do error checking.
1355 <    RealType rCut_;
1356 <
1357 <    RealType rList_ = (rCut_ + skinThickness_);
779 <    RealType rl2 = rList_ * rList_;
780 <    Snapshot* snap_ = sman_->getCurrentSnapshot();
781 <    Mat3x3d Hmat = snap_->getHmat();
782 <    Vector3d Hx = Hmat.getColumn(0);
783 <    Vector3d Hy = Hmat.getColumn(1);
784 <    Vector3d Hz = Hmat.getColumn(2);
1354 >    // Required for triclinic cells
1355 >    Vector3d AxB = cross(A, B);
1356 >    Vector3d BxC = cross(B, C);
1357 >    Vector3d CxA = cross(C, A);
1358  
1359 <    nCells_.x() = (int) ( Hx.length() )/ rList_;
1360 <    nCells_.y() = (int) ( Hy.length() )/ rList_;
1361 <    nCells_.z() = (int) ( Hz.length() )/ rList_;
1359 >    // unit vectors perpendicular to the faces of the triclinic cell:
1360 >    AxB.normalize();
1361 >    BxC.normalize();
1362 >    CxA.normalize();
1363  
1364 <    Mat3x3d invHmat = snap_->getInvHmat();
1365 <    Vector3d rs, scaled, dr;
1366 <    Vector3i whichCell;
1367 <    int cellIndex;
1368 <
1364 >    // A set of perpendicular lengths in triclinic cells:
1365 >    RealType Wa = abs(dot(A, BxC));
1366 >    RealType Wb = abs(dot(B, CxA));
1367 >    RealType Wc = abs(dot(C, AxB));
1368 >    
1369 >    nCells_.x() = int( Wa / rList_ );
1370 >    nCells_.y() = int( Wb / rList_ );
1371 >    nCells_.z() = int( Wc / rList_ );
1372 >    
1373 >    // handle small boxes where the cell offsets can end up repeating cells
1374 >    if (nCells_.x() < 3) doAllPairs = true;
1375 >    if (nCells_.y() < 3) doAllPairs = true;
1376 >    if (nCells_.z() < 3) doAllPairs = true;
1377 >    
1378 >    int nCtot = nCells_.x() * nCells_.y() * nCells_.z();
1379 >    
1380   #ifdef IS_MPI
1381 <    for (int i = 0; i < nGroupsInRow_; i++) {
1382 <      rs = cgRowData.position[i];
1383 <      // scaled positions relative to the box vectors
1384 <      scaled = invHmat * rs;
1385 <      // wrap the vector back into the unit box by subtracting integer box
1386 <      // numbers
1387 <      for (int j = 0; j < 3; j++)
1388 <        scaled[j] -= roundMe(scaled[j]);
1389 <    
1390 <      // find xyz-indices of cell that cutoffGroup is in.
1391 <      whichCell.x() = nCells_.x() * scaled.x();
1392 <      whichCell.y() = nCells_.y() * scaled.y();
1393 <      whichCell.z() = nCells_.z() * scaled.z();
1381 >    cellListRow_.resize(nCtot);
1382 >    cellListCol_.resize(nCtot);
1383 > #else
1384 >    cellList_.resize(nCtot);
1385 > #endif
1386 >    
1387 >    if (!doAllPairs) {
1388 >      
1389 > #ifdef IS_MPI
1390 >      
1391 >      for (int i = 0; i < nGroupsInRow_; i++) {
1392 >        rs = cgRowData.position[i];
1393 >        
1394 >        // scaled positions relative to the box vectors
1395 >        scaled = invBox * rs;
1396 >        
1397 >        // wrap the vector back into the unit box by subtracting integer box
1398 >        // numbers
1399 >        for (int j = 0; j < 3; j++) {
1400 >          scaled[j] -= roundMe(scaled[j]);
1401 >          scaled[j] += 0.5;
1402 >          // Handle the special case when an object is exactly on the
1403 >          // boundary (a scaled coordinate of 1.0 is the same as
1404 >          // scaled coordinate of 0.0)
1405 >          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1406 >        }
1407 >        
1408 >        // find xyz-indices of cell that cutoffGroup is in.
1409 >        whichCell.x() = nCells_.x() * scaled.x();
1410 >        whichCell.y() = nCells_.y() * scaled.y();
1411 >        whichCell.z() = nCells_.z() * scaled.z();
1412 >        
1413 >        // find single index of this cell:
1414 >        cellIndex = Vlinear(whichCell, nCells_);
1415 >        
1416 >        // add this cutoff group to the list of groups in this cell;
1417 >        cellListRow_[cellIndex].push_back(i);
1418 >      }
1419 >      for (int i = 0; i < nGroupsInCol_; i++) {
1420 >        rs = cgColData.position[i];
1421 >        
1422 >        // scaled positions relative to the box vectors
1423 >        scaled = invBox * rs;
1424 >        
1425 >        // wrap the vector back into the unit box by subtracting integer box
1426 >        // numbers
1427 >        for (int j = 0; j < 3; j++) {
1428 >          scaled[j] -= roundMe(scaled[j]);
1429 >          scaled[j] += 0.5;
1430 >          // Handle the special case when an object is exactly on the
1431 >          // boundary (a scaled coordinate of 1.0 is the same as
1432 >          // scaled coordinate of 0.0)
1433 >          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1434 >        }
1435 >        
1436 >        // find xyz-indices of cell that cutoffGroup is in.
1437 >        whichCell.x() = nCells_.x() * scaled.x();
1438 >        whichCell.y() = nCells_.y() * scaled.y();
1439 >        whichCell.z() = nCells_.z() * scaled.z();
1440 >        
1441 >        // find single index of this cell:
1442 >        cellIndex = Vlinear(whichCell, nCells_);
1443 >        
1444 >        // add this cutoff group to the list of groups in this cell;
1445 >        cellListCol_[cellIndex].push_back(i);
1446 >      }
1447 >            
1448 > #else
1449 >      for (int i = 0; i < nGroups_; i++) {
1450 >        rs = snap_->cgData.position[i];
1451 >        
1452 >        // scaled positions relative to the box vectors
1453 >        scaled = invBox * rs;
1454 >        
1455 >        // wrap the vector back into the unit box by subtracting integer box
1456 >        // numbers
1457 >        for (int j = 0; j < 3; j++) {
1458 >          scaled[j] -= roundMe(scaled[j]);
1459 >          scaled[j] += 0.5;
1460 >          // Handle the special case when an object is exactly on the
1461 >          // boundary (a scaled coordinate of 1.0 is the same as
1462 >          // scaled coordinate of 0.0)
1463 >          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1464 >        }
1465 >        
1466 >        // find xyz-indices of cell that cutoffGroup is in.
1467 >        whichCell.x() = int(nCells_.x() * scaled.x());
1468 >        whichCell.y() = int(nCells_.y() * scaled.y());
1469 >        whichCell.z() = int(nCells_.z() * scaled.z());
1470 >        
1471 >        // find single index of this cell:
1472 >        cellIndex = Vlinear(whichCell, nCells_);
1473 >        
1474 >        // add this cutoff group to the list of groups in this cell;
1475 >        cellList_[cellIndex].push_back(i);
1476 >      }
1477  
1478 <      // find single index of this cell:
811 <      cellIndex = Vlinear(whichCell, nCells_);
812 <      // add this cutoff group to the list of groups in this cell;
813 <      cellListRow_[cellIndex].push_back(i);
814 <    }
1478 > #endif
1479  
1480 <    for (int i = 0; i < nGroupsInCol_; i++) {
1481 <      rs = cgColData.position[i];
1482 <      // scaled positions relative to the box vectors
819 <      scaled = invHmat * rs;
820 <      // wrap the vector back into the unit box by subtracting integer box
821 <      // numbers
822 <      for (int j = 0; j < 3; j++)
823 <        scaled[j] -= roundMe(scaled[j]);
824 <
825 <      // find xyz-indices of cell that cutoffGroup is in.
826 <      whichCell.x() = nCells_.x() * scaled.x();
827 <      whichCell.y() = nCells_.y() * scaled.y();
828 <      whichCell.z() = nCells_.z() * scaled.z();
829 <
830 <      // find single index of this cell:
831 <      cellIndex = Vlinear(whichCell, nCells_);
832 <      // add this cutoff group to the list of groups in this cell;
833 <      cellListCol_[cellIndex].push_back(i);
834 <    }
1480 > #ifdef IS_MPI
1481 >      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1482 >        rs = cgRowData.position[j1];
1483   #else
836    for (int i = 0; i < nGroups_; i++) {
837      rs = snap_->cgData.position[i];
838      // scaled positions relative to the box vectors
839      scaled = invHmat * rs;
840      // wrap the vector back into the unit box by subtracting integer box
841      // numbers
842      for (int j = 0; j < 3; j++)
843        scaled[j] -= roundMe(scaled[j]);
1484  
1485 <      // find xyz-indices of cell that cutoffGroup is in.
1486 <      whichCell.x() = nCells_.x() * scaled.x();
847 <      whichCell.y() = nCells_.y() * scaled.y();
848 <      whichCell.z() = nCells_.z() * scaled.z();
849 <
850 <      // find single index of this cell:
851 <      cellIndex = Vlinear(whichCell, nCells_);
852 <      // add this cutoff group to the list of groups in this cell;
853 <      cellList_[cellIndex].push_back(i);
854 <    }
1485 >      for (int j1 = 0; j1 < nGroups_; j1++) {
1486 >        rs = snap_->cgData.position[j1];
1487   #endif
1488 +        point[j1] = len;
1489 +        
1490 +        // scaled positions relative to the box vectors
1491 +        scaled = invBox * rs;
1492 +        
1493 +        // wrap the vector back into the unit box by subtracting integer box
1494 +        // numbers
1495 +        for (int j = 0; j < 3; j++) {
1496 +          scaled[j] -= roundMe(scaled[j]);
1497 +          scaled[j] += 0.5;
1498 +          // Handle the special case when an object is exactly on the
1499 +          // boundary (a scaled coordinate of 1.0 is the same as
1500 +          // scaled coordinate of 0.0)
1501 +          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1502 +        }
1503 +        
1504 +        // find xyz-indices of cell that cutoffGroup is in.
1505 +        whichCell.x() = nCells_.x() * scaled.x();
1506 +        whichCell.y() = nCells_.y() * scaled.y();
1507 +        whichCell.z() = nCells_.z() * scaled.z();
1508 +        
1509 +        // find single index of this cell:
1510 +        int m1 = Vlinear(whichCell, nCells_);
1511  
1512 <    for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1513 <      for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1514 <        for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1515 <          Vector3i m1v(m1x, m1y, m1z);
861 <          int m1 = Vlinear(m1v, nCells_);
1512 >        for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1513 >             os != cellOffsets_.end(); ++os) {
1514 >              
1515 >          Vector3i m2v = whichCell + (*os);
1516  
1517 <          for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1518 <               os != cellOffsets_.end(); ++os) {
1517 >          if (m2v.x() >= nCells_.x()) {
1518 >            m2v.x() = 0;          
1519 >          } else if (m2v.x() < 0) {
1520 >            m2v.x() = nCells_.x() - 1;
1521 >          }
1522 >          
1523 >          if (m2v.y() >= nCells_.y()) {
1524 >            m2v.y() = 0;          
1525 >          } else if (m2v.y() < 0) {
1526 >            m2v.y() = nCells_.y() - 1;
1527 >          }
1528 >          
1529 >          if (m2v.z() >= nCells_.z()) {
1530 >            m2v.z() = 0;          
1531 >          } else if (m2v.z() < 0) {
1532 >            m2v.z() = nCells_.z() - 1;
1533 >          }
1534 >          int m2 = Vlinear (m2v, nCells_);                                      
1535 > #ifdef IS_MPI
1536 >          for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1537 >               j2 != cellListCol_[m2].end(); ++j2) {
1538              
1539 <            Vector3i m2v = m1v + (*os);
1540 <            
1541 <            if (m2v.x() >= nCells_.x()) {
1542 <              m2v.x() = 0;          
1543 <            } else if (m2v.x() < 0) {
1544 <              m2v.x() = nCells_.x() - 1;
1539 >            // In parallel, we need to visit *all* pairs of row
1540 >            // & column indicies and will divide labor in the
1541 >            // force evaluation later.
1542 >            dr = cgColData.position[(*j2)] - rs;
1543 >            if (usePeriodicBoundaryConditions_) {
1544 >              snap_->wrapVector(dr);
1545              }
1546 +            if (dr.lengthSquare() < rListSq_) {
1547 +              neighborList.push_back( (*j2) );
1548 +              ++len;
1549 +            }                
1550 +          }        
1551 + #else
1552 +          for (vector<int>::iterator j2 = cellList_[m2].begin();
1553 +               j2 != cellList_[m2].end(); ++j2) {
1554 +          
1555 +            // Always do this if we're in different cells or if
1556 +            // we're in the same cell and the global index of
1557 +            // the j2 cutoff group is greater than or equal to
1558 +            // the j1 cutoff group.  Note that Rappaport's code
1559 +            // has a "less than" conditional here, but that
1560 +            // deals with atom-by-atom computation.  OpenMD
1561 +            // allows atoms within a single cutoff group to
1562 +            // interact with each other.
1563              
1564 <            if (m2v.y() >= nCells_.y()) {
1565 <              m2v.y() = 0;          
1566 <            } else if (m2v.y() < 0) {
1567 <              m2v.y() = nCells_.y() - 1;
1568 <            }
879 <            
880 <            if (m2v.z() >= nCells_.z()) {
881 <              m2v.z() = 0;          
882 <            } else if (m2v.z() < 0) {
883 <              m2v.z() = nCells_.z() - 1;
884 <            }
885 <            
886 <            int m2 = Vlinear (m2v, nCells_);
887 <
888 < #ifdef IS_MPI
889 <            for (vector<int>::iterator j1 = cellListRow_[m1].begin();
890 <                 j1 != cellListRow_[m1].end(); ++j1) {
891 <              for (vector<int>::iterator j2 = cellListCol_[m2].begin();
892 <                   j2 != cellListCol_[m2].end(); ++j2) {
893 <                              
894 <                // Always do this if we're in different cells or if
895 <                // we're in the same cell and the global index of the
896 <                // j2 cutoff group is less than the j1 cutoff group
897 <
898 <                if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) {
899 <                  dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
900 <                  snap_->wrapVector(dr);
901 <                  if (dr.lengthSquare() < rl2) {
902 <                    neighborList.push_back(make_pair((*j1), (*j2)));
903 <                  }
904 <                }
1564 >            if ( (*j2) >= j1 ) {
1565 >              
1566 >              dr = snap_->cgData.position[(*j2)] - rs;
1567 >              if (usePeriodicBoundaryConditions_) {
1568 >                snap_->wrapVector(dr);
1569                }
1570 <            }
1571 < #else
1572 <            for (vector<int>::iterator j1 = cellList_[m1].begin();
909 <                 j1 != cellList_[m1].end(); ++j1) {
910 <              for (vector<int>::iterator j2 = cellList_[m2].begin();
911 <                   j2 != cellList_[m2].end(); ++j2) {
912 <                              
913 <                // Always do this if we're in different cells or if
914 <                // we're in the same cell and the global index of the
915 <                // j2 cutoff group is less than the j1 cutoff group
916 <
917 <                if (m2 != m1 || (*j2) < (*j1)) {
918 <                  dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
919 <                  snap_->wrapVector(dr);
920 <                  if (dr.lengthSquare() < rl2) {
921 <                    neighborList.push_back(make_pair((*j1), (*j2)));
922 <                  }
923 <                }
1570 >              if ( dr.lengthSquare() < rListSq_) {
1571 >                neighborList.push_back( (*j2) );
1572 >                ++len;
1573                }
1574              }
1575 +          }                
1576   #endif
1577 +        }
1578 +      }      
1579 +    } else {
1580 +      // branch to do all cutoff group pairs
1581 + #ifdef IS_MPI
1582 +      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1583 +        point[j1] = len;
1584 +        rs = cgRowData.position[j1];
1585 +        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1586 +          dr = cgColData.position[j2] - rs;
1587 +          if (usePeriodicBoundaryConditions_) {
1588 +            snap_->wrapVector(dr);
1589            }
1590 +          if (dr.lengthSquare() < rListSq_) {
1591 +            neighborList.push_back( j2 );
1592 +            ++len;
1593 +          }
1594          }
1595 +      }      
1596 + #else
1597 +      // include all groups here.
1598 +      for (int j1 = 0; j1 < nGroups_; j1++) {
1599 +        point[j1] = len;
1600 +        rs = snap_->cgData.position[j1];
1601 +        // include self group interactions j2 == j1
1602 +        for (int j2 = j1; j2 < nGroups_; j2++) {
1603 +          dr = snap_->cgData.position[j2] - rs;
1604 +          if (usePeriodicBoundaryConditions_) {
1605 +            snap_->wrapVector(dr);
1606 +          }
1607 +          if (dr.lengthSquare() < rListSq_) {
1608 +            neighborList.push_back( j2 );
1609 +            ++len;
1610 +          }
1611 +        }    
1612        }
1613 + #endif
1614      }
1615  
1616 + #ifdef IS_MPI
1617 +    point[nGroupsInRow_] = len;
1618 + #else
1619 +    point[nGroups_] = len;
1620 + #endif
1621 +  
1622      // save the local cutoff group positions for the check that is
1623      // done on each loop:
1624      saved_CG_positions_.clear();
1625 +    saved_CG_positions_.reserve(nGroups_);
1626      for (int i = 0; i < nGroups_; i++)
1627        saved_CG_positions_.push_back(snap_->cgData.position[i]);
937
938    return neighborList;
1628    }
1629   } //end namespace OpenMD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines