ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing trunk/src/parallel/ForceMatrixDecomposition.cpp (file contents):
Revision 1893 by gezelter, Wed Jun 19 17:19:07 2013 UTC vs.
Revision 2064 by gezelter, Tue Mar 3 17:02:20 2015 UTC

# Line 50 | Line 50 | namespace OpenMD {
50  
51    ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52  
53 <    // In a parallel computation, row and colum scans must visit all
54 <    // surrounding cells (not just the 14 upper triangular blocks that
55 <    // are used when the processor can see all pairs)
56 < #ifdef IS_MPI
53 >    // Row and colum scans must visit all surrounding cells
54      cellOffsets_.clear();
55      cellOffsets_.push_back( Vector3i(-1,-1,-1) );
56      cellOffsets_.push_back( Vector3i( 0,-1,-1) );
# Line 82 | Line 79 | namespace OpenMD {
79      cellOffsets_.push_back( Vector3i(-1, 1, 1) );
80      cellOffsets_.push_back( Vector3i( 0, 1, 1) );
81      cellOffsets_.push_back( Vector3i( 1, 1, 1) );
85 #endif    
82    }
83  
84  
# Line 99 | Line 95 | namespace OpenMD {
95      nGroups_ = info_->getNLocalCutoffGroups();
96      // gather the information for atomtype IDs (atids):
97      idents = info_->getIdentArray();
98 +    regions = info_->getRegions();
99      AtomLocalToGlobal = info_->getGlobalAtomIndices();
100      cgLocalToGlobal = info_->getGlobalGroupIndices();
101      vector<int> globalGroupMembership = info_->getGlobalGroupMembership();
# Line 118 | Line 115 | namespace OpenMD {
115      
116   #ifdef IS_MPI
117  
118 <    MPI::Intracomm row = rowComm.getComm();
119 <    MPI::Intracomm col = colComm.getComm();
118 >    MPI_Comm row = rowComm.getComm();
119 >    MPI_Comm col = colComm.getComm();
120  
121      AtomPlanIntRow = new Plan<int>(row, nLocal_);
122      AtomPlanRealRow = new Plan<RealType>(row, nLocal_);
# Line 163 | Line 160 | namespace OpenMD {
160      
161      AtomPlanIntRow->gather(idents, identsRow);
162      AtomPlanIntColumn->gather(idents, identsCol);
163 +
164 +    regionsRow.resize(nAtomsInRow_);
165 +    regionsCol.resize(nAtomsInCol_);
166      
167 +    AtomPlanIntRow->gather(regions, regionsRow);
168 +    AtomPlanIntColumn->gather(regions, regionsCol);
169 +    
170      // allocate memory for the parallel objects
171      atypesRow.resize(nAtomsInRow_);
172      atypesCol.resize(nAtomsInCol_);
# Line 299 | Line 302 | namespace OpenMD {
302            groupList_[i].push_back(j);
303          }
304        }      
305 <    }
303 <
304 <
305 <    createGtypeCutoffMap();
306 <
305 >    }    
306    }
308  
309  void ForceMatrixDecomposition::createGtypeCutoffMap() {
310    
311    RealType tol = 1e-6;
312    largestRcut_ = 0.0;
313    int atid;
314    set<AtomType*> atypes = info_->getSimulatedAtomTypes();
315    
316    map<int, RealType> atypeCutoff;
317      
318    for (set<AtomType*>::iterator at = atypes.begin();
319         at != atypes.end(); ++at){
320      atid = (*at)->getIdent();
321      if (userChoseCutoff_)
322        atypeCutoff[atid] = userCutoff_;
323      else
324        atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
325    }
326    
327    vector<RealType> gTypeCutoffs;
328    // first we do a single loop over the cutoff groups to find the
329    // largest cutoff for any atypes present in this group.
330 #ifdef IS_MPI
331    vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
332    groupRowToGtype.resize(nGroupsInRow_);
333    for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
334      vector<int> atomListRow = getAtomsInGroupRow(cg1);
335      for (vector<int>::iterator ia = atomListRow.begin();
336           ia != atomListRow.end(); ++ia) {            
337        int atom1 = (*ia);
338        atid = identsRow[atom1];
339        if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
340          groupCutoffRow[cg1] = atypeCutoff[atid];
341        }
342      }
343
344      bool gTypeFound = false;
345      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
346        if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
347          groupRowToGtype[cg1] = gt;
348          gTypeFound = true;
349        }
350      }
351      if (!gTypeFound) {
352        gTypeCutoffs.push_back( groupCutoffRow[cg1] );
353        groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
354      }
355      
356    }
357    vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
358    groupColToGtype.resize(nGroupsInCol_);
359    for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
360      vector<int> atomListCol = getAtomsInGroupColumn(cg2);
361      for (vector<int>::iterator jb = atomListCol.begin();
362           jb != atomListCol.end(); ++jb) {            
363        int atom2 = (*jb);
364        atid = identsCol[atom2];
365        if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
366          groupCutoffCol[cg2] = atypeCutoff[atid];
367        }
368      }
369      bool gTypeFound = false;
370      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
371        if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
372          groupColToGtype[cg2] = gt;
373          gTypeFound = true;
374        }
375      }
376      if (!gTypeFound) {
377        gTypeCutoffs.push_back( groupCutoffCol[cg2] );
378        groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
379      }
380    }
381 #else
382
383    vector<RealType> groupCutoff(nGroups_, 0.0);
384    groupToGtype.resize(nGroups_);
385    for (int cg1 = 0; cg1 < nGroups_; cg1++) {
386      groupCutoff[cg1] = 0.0;
387      vector<int> atomList = getAtomsInGroupRow(cg1);
388      for (vector<int>::iterator ia = atomList.begin();
389           ia != atomList.end(); ++ia) {            
390        int atom1 = (*ia);
391        atid = idents[atom1];
392        if (atypeCutoff[atid] > groupCutoff[cg1])
393          groupCutoff[cg1] = atypeCutoff[atid];
394      }
395      
396      bool gTypeFound = false;
397      for (unsigned int gt = 0; gt < gTypeCutoffs.size(); gt++) {
398        if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
399          groupToGtype[cg1] = gt;
400          gTypeFound = true;
401        }
402      }
403      if (!gTypeFound) {      
404        gTypeCutoffs.push_back( groupCutoff[cg1] );
405        groupToGtype[cg1] = gTypeCutoffs.size() - 1;
406      }      
407    }
408 #endif
409
410    // Now we find the maximum group cutoff value present in the simulation
411
412    RealType groupMax = *max_element(gTypeCutoffs.begin(),
413                                     gTypeCutoffs.end());
414
415 #ifdef IS_MPI
416    MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE,
417                              MPI::MAX);
418 #endif
307      
420    RealType tradRcut = groupMax;
421
422    for (unsigned int i = 0; i < gTypeCutoffs.size();  i++) {
423      for (unsigned int j = 0; j < gTypeCutoffs.size();  j++) {      
424        RealType thisRcut;
425        switch(cutoffPolicy_) {
426        case TRADITIONAL:
427          thisRcut = tradRcut;
428          break;
429        case MIX:
430          thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
431          break;
432        case MAX:
433          thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
434          break;
435        default:
436          sprintf(painCave.errMsg,
437                  "ForceMatrixDecomposition::createGtypeCutoffMap "
438                  "hit an unknown cutoff policy!\n");
439          painCave.severity = OPENMD_ERROR;
440          painCave.isFatal = 1;
441          simError();
442          break;
443        }
444
445        pair<int,int> key = make_pair(i,j);
446        gTypeCutoffMap[key].first = thisRcut;
447        if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
448        gTypeCutoffMap[key].second = thisRcut*thisRcut;
449        gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
450        // sanity check
451        
452        if (userChoseCutoff_) {
453          if (abs(gTypeCutoffMap[key].first - userCutoff_) > 0.0001) {
454            sprintf(painCave.errMsg,
455                    "ForceMatrixDecomposition::createGtypeCutoffMap "
456                    "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
457            painCave.severity = OPENMD_ERROR;
458            painCave.isFatal = 1;
459            simError();            
460          }
461        }
462      }
463    }
464  }
465
466  groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
467    int i, j;  
468 #ifdef IS_MPI
469    i = groupRowToGtype[cg1];
470    j = groupColToGtype[cg2];
471 #else
472    i = groupToGtype[cg1];
473    j = groupToGtype[cg2];
474 #endif    
475    return gTypeCutoffMap[make_pair(i,j)];
476  }
477
308    int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
309      for (unsigned int j = 0; j < toposForAtom[atom1].size(); j++) {
310        if (toposForAtom[atom1][j] == atom2)
# Line 559 | Line 389 | namespace OpenMD {
389             atomColData.electricField.end(), V3Zero);
390      }
391  
392 +    if (storageLayout_ & DataStorage::dslSitePotential) {    
393 +      fill(atomRowData.sitePotential.begin(),
394 +           atomRowData.sitePotential.end(), 0.0);
395 +      fill(atomColData.sitePotential.begin(),
396 +           atomColData.sitePotential.end(), 0.0);
397 +    }
398 +
399   #endif
400      // even in parallel, we need to zero out the local arrays:
401  
# Line 591 | Line 428 | namespace OpenMD {
428        fill(snap_->atomData.electricField.begin(),
429             snap_->atomData.electricField.end(), V3Zero);
430      }
431 +    if (storageLayout_ & DataStorage::dslSitePotential) {      
432 +      fill(snap_->atomData.sitePotential.begin(),
433 +           snap_->atomData.sitePotential.end(), 0.0);
434 +    }
435    }
436  
437  
438    void ForceMatrixDecomposition::distributeData()  {
439 +  
440 + #ifdef IS_MPI
441 +
442      snap_ = sman_->getCurrentSnapshot();
443      storageLayout_ = sman_->getStorageLayout();
600 #ifdef IS_MPI
444      
445 +    bool needsCG = true;
446 +    if(info_->getNCutoffGroups() != info_->getNAtoms())
447 +      needsCG = false;
448 +
449      // gather up the atomic positions
450      AtomPlanVectorRow->gather(snap_->atomData.position,
451                                atomRowData.position);
# Line 607 | Line 454 | namespace OpenMD {
454      
455      // gather up the cutoff group positions
456  
457 <    cgPlanVectorRow->gather(snap_->cgData.position,
458 <                            cgRowData.position);
459 <
460 <    cgPlanVectorColumn->gather(snap_->cgData.position,
461 <                               cgColData.position);
462 <
457 >    if (needsCG) {
458 >      cgPlanVectorRow->gather(snap_->cgData.position,
459 >                              cgRowData.position);
460 >      
461 >      cgPlanVectorColumn->gather(snap_->cgData.position,
462 >                                 cgColData.position);
463 >    }
464  
465  
466      if (needVelocities_) {
467        // gather up the atomic velocities
468        AtomPlanVectorColumn->gather(snap_->atomData.velocity,
469                                     atomColData.velocity);
470 <      
471 <      cgPlanVectorColumn->gather(snap_->cgData.velocity,
472 <                                 cgColData.velocity);
470 >
471 >      if (needsCG) {        
472 >        cgPlanVectorColumn->gather(snap_->cgData.velocity,
473 >                                   cgColData.velocity);
474 >      }
475      }
476  
477      
# Line 663 | Line 513 | namespace OpenMD {
513     * data structures.
514     */
515    void ForceMatrixDecomposition::collectIntermediateData() {
516 + #ifdef IS_MPI
517 +
518      snap_ = sman_->getCurrentSnapshot();
519      storageLayout_ = sman_->getStorageLayout();
520 < #ifdef IS_MPI
669 <    
520 >
521      if (storageLayout_ & DataStorage::dslDensity) {
522        
523        AtomPlanRealRow->scatter(atomRowData.density,
# Line 701 | Line 552 | namespace OpenMD {
552     * row and column-indexed data structures
553     */
554    void ForceMatrixDecomposition::distributeIntermediateData() {
555 + #ifdef IS_MPI
556      snap_ = sman_->getCurrentSnapshot();
557      storageLayout_ = sman_->getStorageLayout();
558 < #ifdef IS_MPI
558 >
559      if (storageLayout_ & DataStorage::dslFunctional) {
560        AtomPlanRealRow->gather(snap_->atomData.functional,
561                                atomRowData.functional);
# Line 722 | Line 574 | namespace OpenMD {
574    
575    
576    void ForceMatrixDecomposition::collectData() {
577 + #ifdef IS_MPI
578      snap_ = sman_->getCurrentSnapshot();
579      storageLayout_ = sman_->getStorageLayout();
580 < #ifdef IS_MPI    
580 >
581      int n = snap_->atomData.force.size();
582      vector<Vector3d> frc_tmp(n, V3Zero);
583      
# Line 805 | Line 658 | namespace OpenMD {
658          snap_->atomData.electricField[i] += efield_tmp[i];
659      }
660  
661 +    if (storageLayout_ & DataStorage::dslSitePotential) {
662  
663 +      int nsp = snap_->atomData.sitePotential.size();
664 +      vector<RealType> sp_tmp(nsp, 0.0);
665 +
666 +      AtomPlanRealRow->scatter(atomRowData.sitePotential, sp_tmp);
667 +      for (int i = 0; i < nsp; i++) {
668 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
669 +        sp_tmp[i] = 0.0;
670 +      }
671 +      
672 +      AtomPlanRealColumn->scatter(atomColData.sitePotential, sp_tmp);
673 +      for (int i = 0; i < nsp; i++)
674 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
675 +    }
676 +
677      nLocal_ = snap_->getNumberOfAtoms();
678  
679      vector<potVec> pot_temp(nLocal_,
# Line 889 | Line 757 | namespace OpenMD {
757      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
758        RealType ploc1 = pairwisePot[ii];
759        RealType ploc2 = 0.0;
760 <      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
760 >      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
761        pairwisePot[ii] = ploc2;
762      }
763  
764      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
765        RealType ploc1 = excludedPot[ii];
766        RealType ploc2 = 0.0;
767 <      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
767 >      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
768        excludedPot[ii] = ploc2;
769      }
770  
771      // Here be dragons.
772 <    MPI::Intracomm col = colComm.getComm();
772 >    MPI_Comm col = colComm.getComm();
773  
774 <    col.Allreduce(MPI::IN_PLACE,
774 >    MPI_Allreduce(MPI_IN_PLACE,
775                    &snap_->frameData.conductiveHeatFlux[0], 3,
776 <                  MPI::REALTYPE, MPI::SUM);
776 >                  MPI_REALTYPE, MPI_SUM, col);
777  
778  
779   #endif
# Line 917 | Line 785 | namespace OpenMD {
785     * functional) loops onto local data structures.
786     */
787    void ForceMatrixDecomposition::collectSelfData() {
788 +
789 + #ifdef IS_MPI
790      snap_ = sman_->getCurrentSnapshot();
791      storageLayout_ = sman_->getStorageLayout();
792  
923 #ifdef IS_MPI
793      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
794        RealType ploc1 = embeddingPot[ii];
795        RealType ploc2 = 0.0;
796 <      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
796 >      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
797        embeddingPot[ii] = ploc2;
798      }    
799      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
800        RealType ploc1 = excludedSelfPot[ii];
801        RealType ploc2 = 0.0;
802 <      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
802 >      MPI_Allreduce(&ploc1, &ploc2, 1, MPI_REALTYPE, MPI_SUM, MPI_COMM_WORLD);
803        excludedSelfPot[ii] = ploc2;
804      }    
805   #endif
806      
807    }
808  
940
941
809    int& ForceMatrixDecomposition::getNAtomsInRow() {  
810   #ifdef IS_MPI
811      return nAtomsInRow_;
# Line 966 | Line 833 | namespace OpenMD {
833   #endif
834    }
835    
836 <  Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
836 >  Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1,
837 >                                                         int cg2){
838 >
839      Vector3d d;
971    
840   #ifdef IS_MPI
841      d = cgColData.position[cg2] - cgRowData.position[cg1];
842   #else
# Line 998 | Line 866 | namespace OpenMD {
866    }
867  
868  
869 <  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
870 <
869 >  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1,
870 >                                                             int cg1) {
871      Vector3d d;
872      
873   #ifdef IS_MPI
# Line 1013 | Line 881 | namespace OpenMD {
881      return d;    
882    }
883    
884 <  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
884 >  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2,
885 >                                                                int cg2) {
886      Vector3d d;
887      
888   #ifdef IS_MPI
# Line 1044 | Line 913 | namespace OpenMD {
913  
914    }
915      
916 <  Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
916 >  Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1,
917 >                                                          int atom2){
918      Vector3d d;
919      
920   #ifdef IS_MPI
# Line 1066 | Line 936 | namespace OpenMD {
936     * We need to exclude some overcounted interactions that result from
937     * the parallel decomposition.
938     */
939 <  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) {
939 >  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2,
940 >                                              int cg1, int cg2) {
941      int unique_id_1, unique_id_2;
942          
943   #ifdef IS_MPI
# Line 1143 | Line 1014 | namespace OpenMD {
1014  
1015      // filling interaction blocks with pointers
1016    void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
1017 <                                                     int atom1, int atom2) {
1017 >                                                     int atom1, int atom2,
1018 >                                                     bool newAtom1) {
1019  
1020      idat.excluded = excludeAtomPair(atom1, atom2);
1021 <  
1021 >
1022 >    if (newAtom1) {
1023 >      
1024   #ifdef IS_MPI
1025 <    idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
1026 <    //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]),
1027 <    //                         ff_->getAtomType(identsCol[atom2]) );
1028 <    
1025 >      idat.atid1 = identsRow[atom1];
1026 >      idat.atid2 = identsCol[atom2];
1027 >      
1028 >      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1029 >        idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1030 >      } else {
1031 >        idat.sameRegion = false;
1032 >      }
1033 >      
1034 >      if (storageLayout_ & DataStorage::dslAmat) {
1035 >        idat.A1 = &(atomRowData.aMat[atom1]);
1036 >        idat.A2 = &(atomColData.aMat[atom2]);
1037 >      }
1038 >      
1039 >      if (storageLayout_ & DataStorage::dslTorque) {
1040 >        idat.t1 = &(atomRowData.torque[atom1]);
1041 >        idat.t2 = &(atomColData.torque[atom2]);
1042 >      }
1043 >      
1044 >      if (storageLayout_ & DataStorage::dslDipole) {
1045 >        idat.dipole1 = &(atomRowData.dipole[atom1]);
1046 >        idat.dipole2 = &(atomColData.dipole[atom2]);
1047 >      }
1048 >      
1049 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1050 >        idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1051 >        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1052 >      }
1053 >      
1054 >      if (storageLayout_ & DataStorage::dslDensity) {
1055 >        idat.rho1 = &(atomRowData.density[atom1]);
1056 >        idat.rho2 = &(atomColData.density[atom2]);
1057 >      }
1058 >      
1059 >      if (storageLayout_ & DataStorage::dslFunctional) {
1060 >        idat.frho1 = &(atomRowData.functional[atom1]);
1061 >        idat.frho2 = &(atomColData.functional[atom2]);
1062 >      }
1063 >      
1064 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1065 >        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1066 >        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1067 >      }
1068 >      
1069 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1070 >        idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1071 >        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1072 >      }
1073 >      
1074 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1075 >        idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1076 >        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1077 >      }
1078 >      
1079 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1080 >        idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1081 >        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1082 >      }
1083 >      
1084 > #else
1085 >      
1086 >      idat.atid1 = idents[atom1];
1087 >      idat.atid2 = idents[atom2];
1088 >      
1089 >      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1090 >        idat.sameRegion = (regions[atom1] == regions[atom2]);
1091 >      } else {
1092 >        idat.sameRegion = false;
1093 >      }
1094 >      
1095 >      if (storageLayout_ & DataStorage::dslAmat) {
1096 >        idat.A1 = &(snap_->atomData.aMat[atom1]);
1097 >        idat.A2 = &(snap_->atomData.aMat[atom2]);
1098 >      }
1099 >      
1100 >      if (storageLayout_ & DataStorage::dslTorque) {
1101 >        idat.t1 = &(snap_->atomData.torque[atom1]);
1102 >        idat.t2 = &(snap_->atomData.torque[atom2]);
1103 >      }
1104 >      
1105 >      if (storageLayout_ & DataStorage::dslDipole) {
1106 >        idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1107 >        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1108 >      }
1109 >      
1110 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1111 >        idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1112 >        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1113 >      }
1114 >      
1115 >      if (storageLayout_ & DataStorage::dslDensity) {    
1116 >        idat.rho1 = &(snap_->atomData.density[atom1]);
1117 >        idat.rho2 = &(snap_->atomData.density[atom2]);
1118 >      }
1119 >      
1120 >      if (storageLayout_ & DataStorage::dslFunctional) {
1121 >        idat.frho1 = &(snap_->atomData.functional[atom1]);
1122 >        idat.frho2 = &(snap_->atomData.functional[atom2]);
1123 >      }
1124 >      
1125 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1126 >        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1127 >        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1128 >      }
1129 >      
1130 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1131 >        idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1132 >        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1133 >      }
1134 >      
1135 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1136 >        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1137 >        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1138 >      }
1139 >      
1140 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1141 >        idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1142 >        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1143 >      }
1144 > #endif
1145 >      
1146 >    } else {
1147 >      // atom1 is not new, so don't bother updating properties of that atom:
1148 > #ifdef IS_MPI
1149 >    idat.atid2 = identsCol[atom2];
1150 >
1151 >    if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1152 >      idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1153 >    } else {
1154 >      idat.sameRegion = false;
1155 >    }
1156 >
1157      if (storageLayout_ & DataStorage::dslAmat) {
1156      idat.A1 = &(atomRowData.aMat[atom1]);
1158        idat.A2 = &(atomColData.aMat[atom2]);
1159      }
1160      
1161      if (storageLayout_ & DataStorage::dslTorque) {
1161      idat.t1 = &(atomRowData.torque[atom1]);
1162        idat.t2 = &(atomColData.torque[atom2]);
1163      }
1164  
1165      if (storageLayout_ & DataStorage::dslDipole) {
1166      idat.dipole1 = &(atomRowData.dipole[atom1]);
1166        idat.dipole2 = &(atomColData.dipole[atom2]);
1167      }
1168  
1169      if (storageLayout_ & DataStorage::dslQuadrupole) {
1171      idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1170        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1171      }
1172  
1173      if (storageLayout_ & DataStorage::dslDensity) {
1176      idat.rho1 = &(atomRowData.density[atom1]);
1174        idat.rho2 = &(atomColData.density[atom2]);
1175      }
1176  
1177      if (storageLayout_ & DataStorage::dslFunctional) {
1181      idat.frho1 = &(atomRowData.functional[atom1]);
1178        idat.frho2 = &(atomColData.functional[atom2]);
1179      }
1180  
1181      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1186      idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1182        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1183      }
1184  
1185      if (storageLayout_ & DataStorage::dslParticlePot) {
1191      idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1186        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1187      }
1188  
1189      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1196      idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1190        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1191      }
1192  
1193 <    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1201 <      idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1193 >    if (storageLayout_ & DataStorage::dslFlucQPosition) {
1194        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1195      }
1196  
1197 < #else
1198 <    
1207 <    idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
1197 > #else  
1198 >    idat.atid2 = idents[atom2];
1199  
1200 +    if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1201 +      idat.sameRegion = (regions[atom1] == regions[atom2]);
1202 +    } else {
1203 +      idat.sameRegion = false;
1204 +    }
1205 +
1206      if (storageLayout_ & DataStorage::dslAmat) {
1210      idat.A1 = &(snap_->atomData.aMat[atom1]);
1207        idat.A2 = &(snap_->atomData.aMat[atom2]);
1208      }
1209  
1210      if (storageLayout_ & DataStorage::dslTorque) {
1215      idat.t1 = &(snap_->atomData.torque[atom1]);
1211        idat.t2 = &(snap_->atomData.torque[atom2]);
1212      }
1213  
1214      if (storageLayout_ & DataStorage::dslDipole) {
1220      idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1215        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1216      }
1217  
1218      if (storageLayout_ & DataStorage::dslQuadrupole) {
1225      idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1219        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1220      }
1221  
1222      if (storageLayout_ & DataStorage::dslDensity) {    
1230      idat.rho1 = &(snap_->atomData.density[atom1]);
1223        idat.rho2 = &(snap_->atomData.density[atom2]);
1224      }
1225  
1226      if (storageLayout_ & DataStorage::dslFunctional) {
1235      idat.frho1 = &(snap_->atomData.functional[atom1]);
1227        idat.frho2 = &(snap_->atomData.functional[atom2]);
1228      }
1229  
1230      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1240      idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1231        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1232      }
1233  
1234      if (storageLayout_ & DataStorage::dslParticlePot) {
1245      idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1235        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1236      }
1237  
1238      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1250      idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1239        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1240      }
1241  
1242      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1255      idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1243        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1244      }
1245  
1246   #endif
1247 +    }
1248    }
1261
1249    
1250 <  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {    
1250 >  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat,
1251 >                                                       int atom1, int atom2) {  
1252   #ifdef IS_MPI
1253      pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1254      pot_col[atom2] += RealType(0.5) *  *(idat.pot);
# Line 1280 | Line 1268 | namespace OpenMD {
1268        atomColData.electricField[atom2] += *(idat.eField2);
1269      }
1270  
1271 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1272 +      atomRowData.sitePotential[atom1] += *(idat.sPot1);
1273 +      atomColData.sitePotential[atom2] += *(idat.sPot2);
1274 +    }
1275 +
1276   #else
1277      pairwisePot += *(idat.pot);
1278      excludedPot += *(idat.excludedPot);
# Line 1306 | Line 1299 | namespace OpenMD {
1299        snap_->atomData.electricField[atom2] += *(idat.eField2);
1300      }
1301  
1302 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1303 +      snap_->atomData.sitePotential[atom1] += *(idat.sPot1);
1304 +      snap_->atomData.sitePotential[atom2] += *(idat.sPot2);
1305 +    }
1306 +
1307   #endif
1308      
1309    }
# Line 1313 | Line 1311 | namespace OpenMD {
1311    /*
1312     * buildNeighborList
1313     *
1314 <   * first element of pair is row-indexed CutoffGroup
1315 <   * second element of pair is column-indexed CutoffGroup
1314 >   * Constructs the Verlet neighbor list for a force-matrix
1315 >   * decomposition.  In this case, each processor is responsible for
1316 >   * row-site interactions with column-sites.
1317 >   *
1318 >   * neighborList is returned as a packed array of neighboring
1319 >   * column-ordered CutoffGroups.  The starting position in
1320 >   * neighborList for each row-ordered CutoffGroup is given by the
1321 >   * returned vector point.
1322     */
1323 <  vector<pair<int, int> > ForceMatrixDecomposition::buildNeighborList() {
1324 <      
1325 <    vector<pair<int, int> > neighborList;
1326 <    groupCutoffs cuts;
1323 >  void ForceMatrixDecomposition::buildNeighborList(vector<int>& neighborList,
1324 >                                                   vector<int>& point) {
1325 >    neighborList.clear();
1326 >    point.clear();
1327 >    int len = 0;
1328 >    
1329      bool doAllPairs = false;
1330  
1325    RealType rList_ = (largestRcut_ + skinThickness_);
1331      Snapshot* snap_ = sman_->getCurrentSnapshot();
1332      Mat3x3d box;
1333      Mat3x3d invBox;
# Line 1334 | Line 1339 | namespace OpenMD {
1339   #ifdef IS_MPI
1340      cellListRow_.clear();
1341      cellListCol_.clear();
1342 +    point.resize(nGroupsInRow_+1);
1343   #else
1344      cellList_.clear();
1345 +    point.resize(nGroups_+1);
1346   #endif
1347      
1348      if (!usePeriodicBoundaryConditions_) {
# Line 1346 | Line 1353 | namespace OpenMD {
1353        invBox = snap_->getInvHmat();
1354      }
1355      
1356 <    Vector3d boxX = box.getColumn(0);
1357 <    Vector3d boxY = box.getColumn(1);
1358 <    Vector3d boxZ = box.getColumn(2);
1356 >    Vector3d A = box.getColumn(0);
1357 >    Vector3d B = box.getColumn(1);
1358 >    Vector3d C = box.getColumn(2);
1359 >
1360 >    // Required for triclinic cells
1361 >    Vector3d AxB = cross(A, B);
1362 >    Vector3d BxC = cross(B, C);
1363 >    Vector3d CxA = cross(C, A);
1364 >
1365 >    // unit vectors perpendicular to the faces of the triclinic cell:
1366 >    AxB.normalize();
1367 >    BxC.normalize();
1368 >    CxA.normalize();
1369 >
1370 >    // A set of perpendicular lengths in triclinic cells:
1371 >    RealType Wa = abs(dot(A, BxC));
1372 >    RealType Wb = abs(dot(B, CxA));
1373 >    RealType Wc = abs(dot(C, AxB));
1374      
1375 <    nCells_.x() = (int) ( boxX.length() )/ rList_;
1376 <    nCells_.y() = (int) ( boxY.length() )/ rList_;
1377 <    nCells_.z() = (int) ( boxZ.length() )/ rList_;
1375 >    nCells_.x() = int( Wa / rList_ );
1376 >    nCells_.y() = int( Wb / rList_ );
1377 >    nCells_.z() = int( Wc / rList_ );
1378      
1379      // handle small boxes where the cell offsets can end up repeating cells
1358    
1380      if (nCells_.x() < 3) doAllPairs = true;
1381      if (nCells_.y() < 3) doAllPairs = true;
1382      if (nCells_.z() < 3) doAllPairs = true;
# Line 1370 | Line 1391 | namespace OpenMD {
1391   #endif
1392      
1393      if (!doAllPairs) {
1394 +      
1395   #ifdef IS_MPI
1396        
1397        for (int i = 0; i < nGroupsInRow_; i++) {
# Line 1428 | Line 1450 | namespace OpenMD {
1450          // add this cutoff group to the list of groups in this cell;
1451          cellListCol_[cellIndex].push_back(i);
1452        }
1453 <      
1453 >            
1454   #else
1455        for (int i = 0; i < nGroups_; i++) {
1456          rs = snap_->cgData.position[i];
# Line 1448 | Line 1470 | namespace OpenMD {
1470          }
1471          
1472          // find xyz-indices of cell that cutoffGroup is in.
1473 <        whichCell.x() = nCells_.x() * scaled.x();
1474 <        whichCell.y() = nCells_.y() * scaled.y();
1475 <        whichCell.z() = nCells_.z() * scaled.z();
1473 >        whichCell.x() = int(nCells_.x() * scaled.x());
1474 >        whichCell.y() = int(nCells_.y() * scaled.y());
1475 >        whichCell.z() = int(nCells_.z() * scaled.z());
1476          
1477          // find single index of this cell:
1478          cellIndex = Vlinear(whichCell, nCells_);
# Line 1461 | Line 1483 | namespace OpenMD {
1483  
1484   #endif
1485  
1486 <      for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1487 <        for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1488 <          for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1489 <            Vector3i m1v(m1x, m1y, m1z);
1468 <            int m1 = Vlinear(m1v, nCells_);
1469 <            
1470 <            for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1471 <                 os != cellOffsets_.end(); ++os) {
1472 <              
1473 <              Vector3i m2v = m1v + (*os);
1474 <            
1486 > #ifdef IS_MPI
1487 >      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1488 >        rs = cgRowData.position[j1];
1489 > #else
1490  
1491 <              if (m2v.x() >= nCells_.x()) {
1492 <                m2v.x() = 0;          
1493 <              } else if (m2v.x() < 0) {
1494 <                m2v.x() = nCells_.x() - 1;
1495 <              }
1496 <              
1497 <              if (m2v.y() >= nCells_.y()) {
1498 <                m2v.y() = 0;          
1499 <              } else if (m2v.y() < 0) {
1500 <                m2v.y() = nCells_.y() - 1;
1501 <              }
1502 <              
1503 <              if (m2v.z() >= nCells_.z()) {
1504 <                m2v.z() = 0;          
1505 <              } else if (m2v.z() < 0) {
1506 <                m2v.z() = nCells_.z() - 1;
1507 <              }
1491 >      for (int j1 = 0; j1 < nGroups_; j1++) {
1492 >        rs = snap_->cgData.position[j1];
1493 > #endif
1494 >        point[j1] = len;
1495 >        
1496 >        // scaled positions relative to the box vectors
1497 >        scaled = invBox * rs;
1498 >        
1499 >        // wrap the vector back into the unit box by subtracting integer box
1500 >        // numbers
1501 >        for (int j = 0; j < 3; j++) {
1502 >          scaled[j] -= roundMe(scaled[j]);
1503 >          scaled[j] += 0.5;
1504 >          // Handle the special case when an object is exactly on the
1505 >          // boundary (a scaled coordinate of 1.0 is the same as
1506 >          // scaled coordinate of 0.0)
1507 >          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1508 >        }
1509 >        
1510 >        // find xyz-indices of cell that cutoffGroup is in.
1511 >        whichCell.x() = nCells_.x() * scaled.x();
1512 >        whichCell.y() = nCells_.y() * scaled.y();
1513 >        whichCell.z() = nCells_.z() * scaled.z();
1514 >        
1515 >        // find single index of this cell:
1516 >        int m1 = Vlinear(whichCell, nCells_);
1517  
1518 <              int m2 = Vlinear (m2v, nCells_);
1518 >        for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1519 >             os != cellOffsets_.end(); ++os) {
1520                
1521 +          Vector3i m2v = whichCell + (*os);
1522 +
1523 +          if (m2v.x() >= nCells_.x()) {
1524 +            m2v.x() = 0;          
1525 +          } else if (m2v.x() < 0) {
1526 +            m2v.x() = nCells_.x() - 1;
1527 +          }
1528 +          
1529 +          if (m2v.y() >= nCells_.y()) {
1530 +            m2v.y() = 0;          
1531 +          } else if (m2v.y() < 0) {
1532 +            m2v.y() = nCells_.y() - 1;
1533 +          }
1534 +          
1535 +          if (m2v.z() >= nCells_.z()) {
1536 +            m2v.z() = 0;          
1537 +          } else if (m2v.z() < 0) {
1538 +            m2v.z() = nCells_.z() - 1;
1539 +          }
1540 +          int m2 = Vlinear (m2v, nCells_);                                      
1541   #ifdef IS_MPI
1542 <              for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1543 <                   j1 != cellListRow_[m1].end(); ++j1) {
1544 <                for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1545 <                     j2 != cellListCol_[m2].end(); ++j2) {
1546 <                  
1547 <                  // In parallel, we need to visit *all* pairs of row
1548 <                  // & column indicies and will divide labor in the
1549 <                  // force evaluation later.
1550 <                  dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1551 <                  if (usePeriodicBoundaryConditions_) {
1552 <                    snap_->wrapVector(dr);
1553 <                  }
1554 <                  cuts = getGroupCutoffs( (*j1), (*j2) );
1555 <                  if (dr.lengthSquare() < cuts.third) {
1556 <                    neighborList.push_back(make_pair((*j1), (*j2)));
1512 <                  }                  
1513 <                }
1514 <              }
1542 >          for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1543 >               j2 != cellListCol_[m2].end(); ++j2) {
1544 >            
1545 >            // In parallel, we need to visit *all* pairs of row
1546 >            // & column indicies and will divide labor in the
1547 >            // force evaluation later.
1548 >            dr = cgColData.position[(*j2)] - rs;
1549 >            if (usePeriodicBoundaryConditions_) {
1550 >              snap_->wrapVector(dr);
1551 >            }
1552 >            if (dr.lengthSquare() < rListSq_) {
1553 >              neighborList.push_back( (*j2) );
1554 >              ++len;
1555 >            }                
1556 >          }        
1557   #else
1558 <              for (vector<int>::iterator j1 = cellList_[m1].begin();
1559 <                   j1 != cellList_[m1].end(); ++j1) {
1560 <                for (vector<int>::iterator j2 = cellList_[m2].begin();
1561 <                     j2 != cellList_[m2].end(); ++j2) {
1562 <    
1563 <                  // Always do this if we're in different cells or if
1564 <                  // we're in the same cell and the global index of
1565 <                  // the j2 cutoff group is greater than or equal to
1566 <                  // the j1 cutoff group.  Note that Rappaport's code
1567 <                  // has a "less than" conditional here, but that
1568 <                  // deals with atom-by-atom computation.  OpenMD
1569 <                  // allows atoms within a single cutoff group to
1570 <                  // interact with each other.
1571 <
1572 <                  if (m2 != m1 || (*j2) >= (*j1) ) {
1573 <
1574 <                    dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1533 <                    if (usePeriodicBoundaryConditions_) {
1534 <                      snap_->wrapVector(dr);
1535 <                    }
1536 <                    cuts = getGroupCutoffs( (*j1), (*j2) );
1537 <                    if (dr.lengthSquare() < cuts.third) {
1538 <                      neighborList.push_back(make_pair((*j1), (*j2)));
1539 <                    }
1540 <                  }
1541 <                }
1558 >          for (vector<int>::iterator j2 = cellList_[m2].begin();
1559 >               j2 != cellList_[m2].end(); ++j2) {
1560 >          
1561 >            // Always do this if we're in different cells or if
1562 >            // we're in the same cell and the global index of
1563 >            // the j2 cutoff group is greater than or equal to
1564 >            // the j1 cutoff group.  Note that Rappaport's code
1565 >            // has a "less than" conditional here, but that
1566 >            // deals with atom-by-atom computation.  OpenMD
1567 >            // allows atoms within a single cutoff group to
1568 >            // interact with each other.
1569 >            
1570 >            if ( (*j2) >= j1 ) {
1571 >              
1572 >              dr = snap_->cgData.position[(*j2)] - rs;
1573 >              if (usePeriodicBoundaryConditions_) {
1574 >                snap_->wrapVector(dr);
1575                }
1576 < #endif
1576 >              if ( dr.lengthSquare() < rListSq_) {
1577 >                neighborList.push_back( (*j2) );
1578 >                ++len;
1579 >              }
1580              }
1581 <          }
1581 >          }                
1582 > #endif
1583          }
1584 <      }
1584 >      }      
1585      } else {
1586        // branch to do all cutoff group pairs
1587   #ifdef IS_MPI
1588        for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1589 +        point[j1] = len;
1590 +        rs = cgRowData.position[j1];
1591          for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1592 <          dr = cgColData.position[j2] - cgRowData.position[j1];
1592 >          dr = cgColData.position[j2] - rs;
1593            if (usePeriodicBoundaryConditions_) {
1594              snap_->wrapVector(dr);
1595            }
1596 <          cuts = getGroupCutoffs( j1, j2 );
1597 <          if (dr.lengthSquare() < cuts.third) {
1598 <            neighborList.push_back(make_pair(j1, j2));
1596 >          if (dr.lengthSquare() < rListSq_) {
1597 >            neighborList.push_back( j2 );
1598 >            ++len;
1599            }
1600          }
1601        }      
1602   #else
1603        // include all groups here.
1604        for (int j1 = 0; j1 < nGroups_; j1++) {
1605 +        point[j1] = len;
1606 +        rs = snap_->cgData.position[j1];
1607          // include self group interactions j2 == j1
1608          for (int j2 = j1; j2 < nGroups_; j2++) {
1609 <          dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1609 >          dr = snap_->cgData.position[j2] - rs;
1610            if (usePeriodicBoundaryConditions_) {
1611              snap_->wrapVector(dr);
1612            }
1613 <          cuts = getGroupCutoffs( j1, j2 );
1614 <          if (dr.lengthSquare() < cuts.third) {
1615 <            neighborList.push_back(make_pair(j1, j2));
1613 >          if (dr.lengthSquare() < rListSq_) {
1614 >            neighborList.push_back( j2 );
1615 >            ++len;
1616            }
1617          }    
1618        }
1619   #endif
1620      }
1621 <      
1621 >
1622 > #ifdef IS_MPI
1623 >    point[nGroupsInRow_] = len;
1624 > #else
1625 >    point[nGroups_] = len;
1626 > #endif
1627 >  
1628      // save the local cutoff group positions for the check that is
1629      // done on each loop:
1630      saved_CG_positions_.clear();
1631 +    saved_CG_positions_.reserve(nGroups_);
1632      for (int i = 0; i < nGroups_; i++)
1633        saved_CG_positions_.push_back(snap_->cgData.position[i]);
1586    
1587    return neighborList;
1634    }
1635   } //end namespace OpenMD

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines