233 |
|
} |
234 |
|
} |
235 |
|
|
236 |
< |
#endif |
237 |
< |
|
238 |
< |
// allocate memory for the parallel objects |
239 |
< |
atypesLocal.resize(nLocal_); |
240 |
< |
|
241 |
< |
for (int i = 0; i < nLocal_; i++) |
242 |
< |
atypesLocal[i] = ff_->getAtomType(idents[i]); |
243 |
< |
|
244 |
< |
groupList_.clear(); |
245 |
< |
groupList_.resize(nGroups_); |
246 |
< |
for (int i = 0; i < nGroups_; i++) { |
247 |
< |
int gid = cgLocalToGlobal[i]; |
248 |
< |
for (int j = 0; j < nLocal_; j++) { |
249 |
< |
int aid = AtomLocalToGlobal[j]; |
250 |
< |
if (globalGroupMembership[aid] == gid) { |
251 |
< |
groupList_[i].push_back(j); |
252 |
< |
} |
253 |
< |
} |
254 |
< |
} |
255 |
< |
|
236 |
> |
#else |
237 |
|
excludesForAtom.clear(); |
238 |
|
excludesForAtom.resize(nLocal_); |
239 |
|
toposForAtom.clear(); |
247 |
|
for (int j = 0; j < nLocal_; j++) { |
248 |
|
int jglob = AtomLocalToGlobal[j]; |
249 |
|
|
250 |
< |
if (excludes->hasPair(iglob, jglob)) |
250 |
> |
if (excludes->hasPair(iglob, jglob)) |
251 |
|
excludesForAtom[i].push_back(j); |
252 |
|
|
253 |
+ |
|
254 |
|
if (oneTwo->hasPair(iglob, jglob)) { |
255 |
|
toposForAtom[i].push_back(j); |
256 |
|
topoDist[i].push_back(1); |
267 |
|
} |
268 |
|
} |
269 |
|
} |
270 |
< |
|
270 |
> |
#endif |
271 |
> |
|
272 |
> |
// allocate memory for the parallel objects |
273 |
> |
atypesLocal.resize(nLocal_); |
274 |
> |
|
275 |
> |
for (int i = 0; i < nLocal_; i++) |
276 |
> |
atypesLocal[i] = ff_->getAtomType(idents[i]); |
277 |
> |
|
278 |
> |
groupList_.clear(); |
279 |
> |
groupList_.resize(nGroups_); |
280 |
> |
for (int i = 0; i < nGroups_; i++) { |
281 |
> |
int gid = cgLocalToGlobal[i]; |
282 |
> |
for (int j = 0; j < nLocal_; j++) { |
283 |
> |
int aid = AtomLocalToGlobal[j]; |
284 |
> |
if (globalGroupMembership[aid] == gid) { |
285 |
> |
groupList_[i].push_back(j); |
286 |
> |
} |
287 |
> |
} |
288 |
> |
} |
289 |
> |
|
290 |
> |
|
291 |
|
createGtypeCutoffMap(); |
292 |
|
|
293 |
|
} |
685 |
|
} |
686 |
|
|
687 |
|
AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); |
688 |
< |
for (int i = 0; i < ns; i++) |
688 |
> |
for (int i = 0; i < ns; i++) |
689 |
|
snap_->atomData.skippedCharge[i] += skch_tmp[i]; |
690 |
+ |
|
691 |
|
} |
692 |
|
|
693 |
|
nLocal_ = snap_->getNumberOfAtoms(); |
717 |
|
pairwisePot[ii] = ploc2; |
718 |
|
} |
719 |
|
|
720 |
+ |
for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { |
721 |
+ |
RealType ploc1 = embeddingPot[ii]; |
722 |
+ |
RealType ploc2 = 0.0; |
723 |
+ |
MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); |
724 |
+ |
embeddingPot[ii] = ploc2; |
725 |
+ |
} |
726 |
+ |
|
727 |
|
#endif |
728 |
|
|
729 |
|
} |
865 |
|
* field) must still be handled for these pairs. |
866 |
|
*/ |
867 |
|
bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { |
868 |
< |
int unique_id_2; |
869 |
< |
#ifdef IS_MPI |
870 |
< |
// in MPI, we have to look up the unique IDs for the row atom. |
861 |
< |
unique_id_2 = AtomColToGlobal[atom2]; |
862 |
< |
#else |
863 |
< |
// in the normal loop, the atom numbers are unique |
864 |
< |
unique_id_2 = atom2; |
865 |
< |
#endif |
868 |
> |
|
869 |
> |
// excludesForAtom was constructed to use row/column indices in the MPI |
870 |
> |
// version, and to use local IDs in the non-MPI version: |
871 |
|
|
872 |
|
for (vector<int>::iterator i = excludesForAtom[atom1].begin(); |
873 |
|
i != excludesForAtom[atom1].end(); ++i) { |
874 |
< |
if ( (*i) == unique_id_2 ) return true; |
874 |
> |
if ( (*i) == atom2 ) return true; |
875 |
|
} |
876 |
|
|
877 |
|
return false; |