ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing branches/development/src/parallel/ForceMatrixDecomposition.cpp (file contents):
Revision 1601 by gezelter, Thu Aug 4 20:04:35 2011 UTC vs.
Revision 1761 by gezelter, Fri Jun 22 20:01:37 2012 UTC

# Line 36 | Line 36
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38   * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
39 < * [4]  Vardeman & Gezelter, in progress (2009).                        
39 > * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40 > * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42   #include "parallel/ForceMatrixDecomposition.hpp"
43   #include "math/SquareMatrix3.hpp"
# Line 53 | Line 54 | namespace OpenMD {
54      // surrounding cells (not just the 14 upper triangular blocks that
55      // are used when the processor can see all pairs)
56   #ifdef IS_MPI
57 <    cellOffsets_.push_back( Vector3i(-1, 0, 0) );
57 <    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
58 <    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
59 <    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
60 <    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
61 <    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
57 >    cellOffsets_.clear();
58      cellOffsets_.push_back( Vector3i(-1,-1,-1) );
59      cellOffsets_.push_back( Vector3i( 0,-1,-1) );
60 <    cellOffsets_.push_back( Vector3i( 1,-1,-1) );
60 >    cellOffsets_.push_back( Vector3i( 1,-1,-1) );                          
61 >    cellOffsets_.push_back( Vector3i(-1, 0,-1) );
62 >    cellOffsets_.push_back( Vector3i( 0, 0,-1) );
63      cellOffsets_.push_back( Vector3i( 1, 0,-1) );
66    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
67    cellOffsets_.push_back( Vector3i( 0, 1,-1) );
64      cellOffsets_.push_back( Vector3i(-1, 1,-1) );
65 +    cellOffsets_.push_back( Vector3i( 0, 1,-1) );      
66 +    cellOffsets_.push_back( Vector3i( 1, 1,-1) );
67 +    cellOffsets_.push_back( Vector3i(-1,-1, 0) );
68 +    cellOffsets_.push_back( Vector3i( 0,-1, 0) );
69 +    cellOffsets_.push_back( Vector3i( 1,-1, 0) );
70 +    cellOffsets_.push_back( Vector3i(-1, 0, 0) );      
71 +    cellOffsets_.push_back( Vector3i( 0, 0, 0) );
72 +    cellOffsets_.push_back( Vector3i( 1, 0, 0) );
73 +    cellOffsets_.push_back( Vector3i(-1, 1, 0) );
74 +    cellOffsets_.push_back( Vector3i( 0, 1, 0) );
75 +    cellOffsets_.push_back( Vector3i( 1, 1, 0) );
76 +    cellOffsets_.push_back( Vector3i(-1,-1, 1) );
77 +    cellOffsets_.push_back( Vector3i( 0,-1, 1) );
78 +    cellOffsets_.push_back( Vector3i( 1,-1, 1) );
79 +    cellOffsets_.push_back( Vector3i(-1, 0, 1) );
80 +    cellOffsets_.push_back( Vector3i( 0, 0, 1) );
81 +    cellOffsets_.push_back( Vector3i( 1, 0, 1) );
82 +    cellOffsets_.push_back( Vector3i(-1, 1, 1) );
83 +    cellOffsets_.push_back( Vector3i( 0, 1, 1) );
84 +    cellOffsets_.push_back( Vector3i( 1, 1, 1) );
85   #endif    
86    }
87  
# Line 79 | Line 95 | namespace OpenMD {
95      storageLayout_ = sman_->getStorageLayout();
96      ff_ = info_->getForceField();
97      nLocal_ = snap_->getNumberOfAtoms();
98 <    
98 >  
99      nGroups_ = info_->getNLocalCutoffGroups();
100      // gather the information for atomtype IDs (atids):
101      idents = info_->getIdentArray();
# Line 93 | Line 109 | namespace OpenMD {
109      PairList* oneTwo = info_->getOneTwoInteractions();
110      PairList* oneThree = info_->getOneThreeInteractions();
111      PairList* oneFour = info_->getOneFourInteractions();
112 <
112 >    
113 >    if (needVelocities_)
114 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition |
115 >                                     DataStorage::dslVelocity);
116 >    else
117 >      snap_->cgData.setStorageLayout(DataStorage::dslPosition);
118 >    
119   #ifdef IS_MPI
120  
121      MPI::Intracomm row = rowComm.getComm();
# Line 129 | Line 151 | namespace OpenMD {
151      cgRowData.resize(nGroupsInRow_);
152      cgRowData.setStorageLayout(DataStorage::dslPosition);
153      cgColData.resize(nGroupsInCol_);
154 <    cgColData.setStorageLayout(DataStorage::dslPosition);
155 <        
154 >    if (needVelocities_)
155 >      // we only need column velocities if we need them.
156 >      cgColData.setStorageLayout(DataStorage::dslPosition |
157 >                                 DataStorage::dslVelocity);
158 >    else    
159 >      cgColData.setStorageLayout(DataStorage::dslPosition);
160 >      
161      identsRow.resize(nAtomsInRow_);
162      identsCol.resize(nAtomsInCol_);
163      
# Line 148 | Line 175 | namespace OpenMD {
175  
176      pot_row.resize(nAtomsInRow_);
177      pot_col.resize(nAtomsInCol_);
178 +
179 +    expot_row.resize(nAtomsInRow_);
180 +    expot_col.resize(nAtomsInCol_);
181  
182      AtomRowToGlobal.resize(nAtomsInRow_);
183      AtomColToGlobal.resize(nAtomsInCol_);
# Line 218 | Line 248 | namespace OpenMD {
248        }      
249      }
250  
251 < #endif
222 <
223 <    // allocate memory for the parallel objects
224 <    atypesLocal.resize(nLocal_);
225 <
226 <    for (int i = 0; i < nLocal_; i++)
227 <      atypesLocal[i] = ff_->getAtomType(idents[i]);
228 <
229 <    groupList_.clear();
230 <    groupList_.resize(nGroups_);
231 <    for (int i = 0; i < nGroups_; i++) {
232 <      int gid = cgLocalToGlobal[i];
233 <      for (int j = 0; j < nLocal_; j++) {
234 <        int aid = AtomLocalToGlobal[j];
235 <        if (globalGroupMembership[aid] == gid) {
236 <          groupList_[i].push_back(j);
237 <        }
238 <      }      
239 <    }
240 <
251 > #else
252      excludesForAtom.clear();
253      excludesForAtom.resize(nLocal_);
254      toposForAtom.clear();
# Line 270 | Line 281 | namespace OpenMD {
281          }
282        }      
283      }
284 <    
284 > #endif
285 >
286 >    // allocate memory for the parallel objects
287 >    atypesLocal.resize(nLocal_);
288 >
289 >    for (int i = 0; i < nLocal_; i++)
290 >      atypesLocal[i] = ff_->getAtomType(idents[i]);
291 >
292 >    groupList_.clear();
293 >    groupList_.resize(nGroups_);
294 >    for (int i = 0; i < nGroups_; i++) {
295 >      int gid = cgLocalToGlobal[i];
296 >      for (int j = 0; j < nLocal_; j++) {
297 >        int aid = AtomLocalToGlobal[j];
298 >        if (globalGroupMembership[aid] == gid) {
299 >          groupList_[i].push_back(j);
300 >        }
301 >      }      
302 >    }
303 >
304 >
305      createGtypeCutoffMap();
306  
307    }
# Line 433 | Line 464 | namespace OpenMD {
464      }
465    }
466  
436
467    groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) {
468      int i, j;  
469   #ifdef IS_MPI
# Line 457 | Line 487 | namespace OpenMD {
487    void ForceMatrixDecomposition::zeroWorkArrays() {
488      pairwisePot = 0.0;
489      embeddingPot = 0.0;
490 +    excludedPot = 0.0;
491 +    excludedSelfPot = 0.0;
492  
493   #ifdef IS_MPI
494      if (storageLayout_ & DataStorage::dslForce) {
# Line 475 | Line 507 | namespace OpenMD {
507      fill(pot_col.begin(), pot_col.end(),
508           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
509  
510 +    fill(expot_row.begin(), expot_row.end(),
511 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
512 +
513 +    fill(expot_col.begin(), expot_col.end(),
514 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));  
515 +
516      if (storageLayout_ & DataStorage::dslParticlePot) {    
517        fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(),
518             0.0);
# Line 506 | Line 544 | namespace OpenMD {
544             atomRowData.skippedCharge.end(), 0.0);
545        fill(atomColData.skippedCharge.begin(),
546             atomColData.skippedCharge.end(), 0.0);
547 +    }
548 +
549 +    if (storageLayout_ & DataStorage::dslFlucQForce) {      
550 +      fill(atomRowData.flucQFrc.begin(),
551 +           atomRowData.flucQFrc.end(), 0.0);
552 +      fill(atomColData.flucQFrc.begin(),
553 +           atomColData.flucQFrc.end(), 0.0);
554      }
555  
556 +    if (storageLayout_ & DataStorage::dslElectricField) {    
557 +      fill(atomRowData.electricField.begin(),
558 +           atomRowData.electricField.end(), V3Zero);
559 +      fill(atomColData.electricField.begin(),
560 +           atomColData.electricField.end(), V3Zero);
561 +    }
562 +
563 +    if (storageLayout_ & DataStorage::dslFlucQForce) {    
564 +      fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(),
565 +           0.0);
566 +      fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(),
567 +           0.0);
568 +    }
569 +
570   #endif
571      // even in parallel, we need to zero out the local arrays:
572  
# Line 520 | Line 579 | namespace OpenMD {
579        fill(snap_->atomData.density.begin(),
580             snap_->atomData.density.end(), 0.0);
581      }
582 +
583      if (storageLayout_ & DataStorage::dslFunctional) {
584        fill(snap_->atomData.functional.begin(),
585             snap_->atomData.functional.end(), 0.0);
586      }
587 +
588      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {      
589        fill(snap_->atomData.functionalDerivative.begin(),
590             snap_->atomData.functionalDerivative.end(), 0.0);
591      }
592 +
593      if (storageLayout_ & DataStorage::dslSkippedCharge) {      
594        fill(snap_->atomData.skippedCharge.begin(),
595             snap_->atomData.skippedCharge.end(), 0.0);
596      }
597 <    
597 >
598 >    if (storageLayout_ & DataStorage::dslElectricField) {      
599 >      fill(snap_->atomData.electricField.begin(),
600 >           snap_->atomData.electricField.end(), V3Zero);
601 >    }
602    }
603  
604  
# Line 555 | Line 621 | namespace OpenMD {
621      cgPlanVectorColumn->gather(snap_->cgData.position,
622                                 cgColData.position);
623  
624 +
625 +
626 +    if (needVelocities_) {
627 +      // gather up the atomic velocities
628 +      AtomPlanVectorColumn->gather(snap_->atomData.velocity,
629 +                                   atomColData.velocity);
630 +      
631 +      cgPlanVectorColumn->gather(snap_->cgData.velocity,
632 +                                 cgColData.velocity);
633 +    }
634 +
635      
636      // if needed, gather the atomic rotation matrices
637      if (storageLayout_ & DataStorage::dslAmat) {
# Line 572 | Line 649 | namespace OpenMD {
649                                     atomColData.electroFrame);
650      }
651  
652 +    // if needed, gather the atomic fluctuating charge values
653 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {
654 +      AtomPlanRealRow->gather(snap_->atomData.flucQPos,
655 +                              atomRowData.flucQPos);
656 +      AtomPlanRealColumn->gather(snap_->atomData.flucQPos,
657 +                                 atomColData.flucQPos);
658 +    }
659 +
660   #endif      
661    }
662    
# Line 594 | Line 679 | namespace OpenMD {
679        for (int i = 0; i < n; i++)
680          snap_->atomData.density[i] += rho_tmp[i];
681      }
682 +
683 +    if (storageLayout_ & DataStorage::dslElectricField) {
684 +      
685 +      AtomPlanVectorRow->scatter(atomRowData.electricField,
686 +                                 snap_->atomData.electricField);
687 +      
688 +      int n = snap_->atomData.electricField.size();
689 +      vector<Vector3d> field_tmp(n, V3Zero);
690 +      AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp);
691 +      for (int i = 0; i < n; i++)
692 +        snap_->atomData.electricField[i] += field_tmp[i];
693 +    }
694   #endif
695    }
696  
# Line 668 | Line 765 | namespace OpenMD {
765        }
766        
767        AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp);
768 <      for (int i = 0; i < ns; i++)
768 >      for (int i = 0; i < ns; i++)
769          snap_->atomData.skippedCharge[i] += skch_tmp[i];
770 +            
771      }
772      
773 +    if (storageLayout_ & DataStorage::dslFlucQForce) {
774 +
775 +      int nq = snap_->atomData.flucQFrc.size();
776 +      vector<RealType> fqfrc_tmp(nq, 0.0);
777 +
778 +      AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp);
779 +      for (int i = 0; i < nq; i++) {
780 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
781 +        fqfrc_tmp[i] = 0.0;
782 +      }
783 +      
784 +      AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp);
785 +      for (int i = 0; i < nq; i++)
786 +        snap_->atomData.flucQFrc[i] += fqfrc_tmp[i];
787 +            
788 +    }
789 +
790      nLocal_ = snap_->getNumberOfAtoms();
791  
792      vector<potVec> pot_temp(nLocal_,
793                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
794 +    vector<potVec> expot_temp(nLocal_,
795 +                              Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
796  
797      // scatter/gather pot_row into the members of my column
798            
799      AtomPlanPotRow->scatter(pot_row, pot_temp);
800 +    AtomPlanPotRow->scatter(expot_row, expot_temp);
801  
802 <    for (int ii = 0;  ii < pot_temp.size(); ii++ )
802 >    for (int ii = 0;  ii < pot_temp.size(); ii++ )
803        pairwisePot += pot_temp[ii];
804 <    
804 >
805 >    for (int ii = 0;  ii < expot_temp.size(); ii++ )
806 >      excludedPot += expot_temp[ii];
807 >        
808 >    if (storageLayout_ & DataStorage::dslParticlePot) {
809 >      // This is the pairwise contribution to the particle pot.  The
810 >      // embedding contribution is added in each of the low level
811 >      // non-bonded routines.  In single processor, this is done in
812 >      // unpackInteractionData, not in collectData.
813 >      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
814 >        for (int i = 0; i < nLocal_; i++) {
815 >          // factor of two is because the total potential terms are divided
816 >          // by 2 in parallel due to row/ column scatter      
817 >          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
818 >        }
819 >      }
820 >    }
821 >
822      fill(pot_temp.begin(), pot_temp.end(),
823           Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
824 +    fill(expot_temp.begin(), expot_temp.end(),
825 +         Vector<RealType, N_INTERACTION_FAMILIES> (0.0));
826        
827      AtomPlanPotColumn->scatter(pot_col, pot_temp);    
828 +    AtomPlanPotColumn->scatter(expot_col, expot_temp);    
829      
830      for (int ii = 0;  ii < pot_temp.size(); ii++ )
831        pairwisePot += pot_temp[ii];    
832 +
833 +    for (int ii = 0;  ii < expot_temp.size(); ii++ )
834 +      excludedPot += expot_temp[ii];    
835 +
836 +    if (storageLayout_ & DataStorage::dslParticlePot) {
837 +      // This is the pairwise contribution to the particle pot.  The
838 +      // embedding contribution is added in each of the low level
839 +      // non-bonded routines.  In single processor, this is done in
840 +      // unpackInteractionData, not in collectData.
841 +      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
842 +        for (int i = 0; i < nLocal_; i++) {
843 +          // factor of two is because the total potential terms are divided
844 +          // by 2 in parallel due to row/ column scatter      
845 +          snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii);
846 +        }
847 +      }
848 +    }
849      
850 +    if (storageLayout_ & DataStorage::dslParticlePot) {
851 +      int npp = snap_->atomData.particlePot.size();
852 +      vector<RealType> ppot_temp(npp, 0.0);
853 +
854 +      // This is the direct or embedding contribution to the particle
855 +      // pot.
856 +      
857 +      AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp);
858 +      for (int i = 0; i < npp; i++) {
859 +        snap_->atomData.particlePot[i] += ppot_temp[i];
860 +      }
861 +
862 +      fill(ppot_temp.begin(), ppot_temp.end(), 0.0);
863 +      
864 +      AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp);
865 +      for (int i = 0; i < npp; i++) {
866 +        snap_->atomData.particlePot[i] += ppot_temp[i];
867 +      }
868 +    }
869 +
870      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
871        RealType ploc1 = pairwisePot[ii];
872        RealType ploc2 = 0.0;
# Line 699 | Line 874 | namespace OpenMD {
874        pairwisePot[ii] = ploc2;
875      }
876  
877 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
878 +      RealType ploc1 = excludedPot[ii];
879 +      RealType ploc2 = 0.0;
880 +      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
881 +      excludedPot[ii] = ploc2;
882 +    }
883 +
884 +    // Here be dragons.
885 +    MPI::Intracomm col = colComm.getComm();
886 +
887 +    col.Allreduce(MPI::IN_PLACE,
888 +                  &snap_->frameData.conductiveHeatFlux[0], 3,
889 +                  MPI::REALTYPE, MPI::SUM);
890 +
891 +
892   #endif
893  
894    }
895  
896 +  /**
897 +   * Collects information obtained during the post-pair (and embedding
898 +   * functional) loops onto local data structures.
899 +   */
900 +  void ForceMatrixDecomposition::collectSelfData() {
901 +    snap_ = sman_->getCurrentSnapshot();
902 +    storageLayout_ = sman_->getStorageLayout();
903 +
904 + #ifdef IS_MPI
905 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
906 +      RealType ploc1 = embeddingPot[ii];
907 +      RealType ploc2 = 0.0;
908 +      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
909 +      embeddingPot[ii] = ploc2;
910 +    }    
911 +    for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
912 +      RealType ploc1 = excludedSelfPot[ii];
913 +      RealType ploc2 = 0.0;
914 +      MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM);
915 +      excludedSelfPot[ii] = ploc2;
916 +    }    
917 + #endif
918 +    
919 +  }
920 +
921 +
922 +
923    int ForceMatrixDecomposition::getNAtomsInRow() {  
924   #ifdef IS_MPI
925      return nAtomsInRow_;
# Line 743 | Line 960 | namespace OpenMD {
960      return d;    
961    }
962  
963 +  Vector3d ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){
964 + #ifdef IS_MPI
965 +    return cgColData.velocity[cg2];
966 + #else
967 +    return snap_->cgData.velocity[cg2];
968 + #endif
969 +  }
970  
971 +  Vector3d ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){
972 + #ifdef IS_MPI
973 +    return atomColData.velocity[atom2];
974 + #else
975 +    return snap_->atomData.velocity[atom2];
976 + #endif
977 +  }
978 +
979 +
980    Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
981  
982      Vector3d d;
# Line 809 | Line 1042 | namespace OpenMD {
1042     * We need to exclude some overcounted interactions that result from
1043     * the parallel decomposition.
1044     */
1045 <  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) {
1046 <    int unique_id_1, unique_id_2;
1047 <    
1045 >  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) {
1046 >    int unique_id_1, unique_id_2, group1, group2;
1047 >        
1048   #ifdef IS_MPI
1049      // in MPI, we have to look up the unique IDs for each atom
1050      unique_id_1 = AtomRowToGlobal[atom1];
1051      unique_id_2 = AtomColToGlobal[atom2];
1052 +    group1 = cgRowToGlobal[cg1];
1053 +    group2 = cgColToGlobal[cg2];
1054 + #else
1055 +    unique_id_1 = AtomLocalToGlobal[atom1];
1056 +    unique_id_2 = AtomLocalToGlobal[atom2];
1057 +    group1 = cgLocalToGlobal[cg1];
1058 +    group2 = cgLocalToGlobal[cg2];
1059 + #endif  
1060  
820    // this situation should only arise in MPI simulations
1061      if (unique_id_1 == unique_id_2) return true;
1062 <    
1062 >
1063 > #ifdef IS_MPI
1064      // this prevents us from doing the pair on multiple processors
1065      if (unique_id_1 < unique_id_2) {
1066        if ((unique_id_1 + unique_id_2) % 2 == 0) return true;
1067      } else {
1068 <      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
1068 >      if ((unique_id_1 + unique_id_2) % 2 == 1) return true;
1069      }
1070 + #endif    
1071 +
1072 + #ifndef IS_MPI
1073 +    if (group1 == group2) {
1074 +      if (unique_id_1 < unique_id_2) return true;
1075 +    }
1076   #endif
1077 +    
1078      return false;
1079    }
1080  
# Line 840 | Line 1088 | namespace OpenMD {
1088     * field) must still be handled for these pairs.
1089     */
1090    bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) {
1091 <    int unique_id_2;
1092 < #ifdef IS_MPI
1093 <    // in MPI, we have to look up the unique IDs for the row atom.
846 <    unique_id_2 = AtomColToGlobal[atom2];
847 < #else
848 <    // in the normal loop, the atom numbers are unique
849 <    unique_id_2 = atom2;
850 < #endif
1091 >
1092 >    // excludesForAtom was constructed to use row/column indices in the MPI
1093 >    // version, and to use local IDs in the non-MPI version:
1094      
1095      for (vector<int>::iterator i = excludesForAtom[atom1].begin();
1096           i != excludesForAtom[atom1].end(); ++i) {
1097 <      if ( (*i) == unique_id_2 ) return true;
1097 >      if ( (*i) == atom2 ) return true;
1098      }
1099  
1100      return false;
# Line 925 | Line 1168 | namespace OpenMD {
1168        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1169      }
1170  
1171 < #else
1171 >    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1172 >      idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1173 >      idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1174 >    }
1175  
1176 + #else
1177 +    
1178      idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
931    //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]),
932    //                         ff_->getAtomType(idents[atom2]) );
1179  
1180      if (storageLayout_ & DataStorage::dslAmat) {
1181        idat.A1 = &(snap_->atomData.aMat[atom1]);
# Line 970 | Line 1216 | namespace OpenMD {
1216        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1217        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1218      }
1219 +
1220 +    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1221 +      idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1222 +      idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1223 +    }
1224 +
1225   #endif
1226    }
1227  
1228    
1229    void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {    
1230   #ifdef IS_MPI
1231 <    pot_row[atom1] += 0.5 *  *(idat.pot);
1232 <    pot_col[atom2] += 0.5 *  *(idat.pot);
1231 >    pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1232 >    pot_col[atom2] += RealType(0.5) *  *(idat.pot);
1233 >    expot_row[atom1] += RealType(0.5) *  *(idat.excludedPot);
1234 >    expot_col[atom2] += RealType(0.5) *  *(idat.excludedPot);
1235  
1236      atomRowData.force[atom1] += *(idat.f1);
1237      atomColData.force[atom2] -= *(idat.f1);
1238 +
1239 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1240 +      atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1241 +      atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1242 +    }
1243 +
1244 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1245 +      atomRowData.electricField[atom1] += *(idat.eField1);
1246 +      atomColData.electricField[atom2] += *(idat.eField2);
1247 +    }
1248 +
1249   #else
1250      pairwisePot += *(idat.pot);
1251 +    excludedPot += *(idat.excludedPot);
1252  
1253      snap_->atomData.force[atom1] += *(idat.f1);
1254      snap_->atomData.force[atom2] -= *(idat.f1);
1255 +
1256 +    if (idat.doParticlePot) {
1257 +      // This is the pairwise contribution to the particle pot.  The
1258 +      // embedding contribution is added in each of the low level
1259 +      // non-bonded routines.  In parallel, this calculation is done
1260 +      // in collectData, not in unpackInteractionData.
1261 +      snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw);
1262 +      snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw);
1263 +    }
1264 +    
1265 +    if (storageLayout_ & DataStorage::dslFlucQForce) {              
1266 +      snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1);
1267 +      snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2);
1268 +    }
1269 +
1270 +    if (storageLayout_ & DataStorage::dslElectricField) {              
1271 +      snap_->atomData.electricField[atom1] += *(idat.eField1);
1272 +      snap_->atomData.electricField[atom2] += *(idat.eField2);
1273 +    }
1274 +
1275   #endif
1276      
1277    }
# Line 1091 | Line 1377 | namespace OpenMD {
1377          // add this cutoff group to the list of groups in this cell;
1378          cellListCol_[cellIndex].push_back(i);
1379        }
1380 +    
1381   #else
1382        for (int i = 0; i < nGroups_; i++) {
1383          rs = snap_->cgData.position[i];
# Line 1116 | Line 1403 | namespace OpenMD {
1403          // add this cutoff group to the list of groups in this cell;
1404          cellList_[cellIndex].push_back(i);
1405        }
1406 +
1407   #endif
1408  
1409        for (int m1z = 0; m1z < nCells_.z(); m1z++) {
# Line 1128 | Line 1416 | namespace OpenMD {
1416                   os != cellOffsets_.end(); ++os) {
1417                
1418                Vector3i m2v = m1v + (*os);
1419 <              
1419 >            
1420 >
1421                if (m2v.x() >= nCells_.x()) {
1422                  m2v.x() = 0;          
1423                } else if (m2v.x() < 0) {
# Line 1146 | Line 1435 | namespace OpenMD {
1435                } else if (m2v.z() < 0) {
1436                  m2v.z() = nCells_.z() - 1;
1437                }
1438 <              
1438 >
1439                int m2 = Vlinear (m2v, nCells_);
1440                
1441   #ifdef IS_MPI
# Line 1155 | Line 1444 | namespace OpenMD {
1444                  for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1445                       j2 != cellListCol_[m2].end(); ++j2) {
1446                    
1447 <                  // In parallel, we need to visit *all* pairs of row &
1448 <                  // column indicies and will truncate later on.
1447 >                  // In parallel, we need to visit *all* pairs of row
1448 >                  // & column indicies and will divide labor in the
1449 >                  // force evaluation later.
1450                    dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1451                    snap_->wrapVector(dr);
1452                    cuts = getGroupCutoffs( (*j1), (*j2) );
# Line 1166 | Line 1456 | namespace OpenMD {
1456                  }
1457                }
1458   #else
1169              
1459                for (vector<int>::iterator j1 = cellList_[m1].begin();
1460                     j1 != cellList_[m1].end(); ++j1) {
1461                  for (vector<int>::iterator j2 = cellList_[m2].begin();
1462                       j2 != cellList_[m2].end(); ++j2) {
1463 <                  
1463 >    
1464                    // Always do this if we're in different cells or if
1465 <                  // we're in the same cell and the global index of the
1466 <                  // j2 cutoff group is less than the j1 cutoff group
1467 <                  
1468 <                  if (m2 != m1 || (*j2) < (*j1)) {
1465 >                  // we're in the same cell and the global index of
1466 >                  // the j2 cutoff group is greater than or equal to
1467 >                  // the j1 cutoff group.  Note that Rappaport's code
1468 >                  // has a "less than" conditional here, but that
1469 >                  // deals with atom-by-atom computation.  OpenMD
1470 >                  // allows atoms within a single cutoff group to
1471 >                  // interact with each other.
1472 >
1473 >
1474 >
1475 >                  if (m2 != m1 || (*j2) >= (*j1) ) {
1476 >
1477                      dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1478                      snap_->wrapVector(dr);
1479                      cuts = getGroupCutoffs( (*j1), (*j2) );
# Line 1195 | Line 1492 | namespace OpenMD {
1492        // branch to do all cutoff group pairs
1493   #ifdef IS_MPI
1494        for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1495 <        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {      
1495 >        for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1496            dr = cgColData.position[j2] - cgRowData.position[j1];
1497            snap_->wrapVector(dr);
1498            cuts = getGroupCutoffs( j1, j2 );
# Line 1203 | Line 1500 | namespace OpenMD {
1500              neighborList.push_back(make_pair(j1, j2));
1501            }
1502          }
1503 <      }
1503 >      }      
1504   #else
1505 <      for (int j1 = 0; j1 < nGroups_ - 1; j1++) {
1506 <        for (int j2 = j1 + 1; j2 < nGroups_; j2++) {
1505 >      // include all groups here.
1506 >      for (int j1 = 0; j1 < nGroups_; j1++) {
1507 >        // include self group interactions j2 == j1
1508 >        for (int j2 = j1; j2 < nGroups_; j2++) {
1509            dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1510            snap_->wrapVector(dr);
1511            cuts = getGroupCutoffs( j1, j2 );
1512            if (dr.lengthSquare() < cuts.third) {
1513              neighborList.push_back(make_pair(j1, j2));
1514            }
1515 <        }
1516 <      }        
1515 >        }    
1516 >      }
1517   #endif
1518      }
1519        

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines