ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/parallel/ForceMatrixDecomposition.cpp
(Generate patch)

Comparing trunk/src/parallel/ForceMatrixDecomposition.cpp (file contents):
Revision 1969 by gezelter, Wed Feb 26 14:14:50 2014 UTC vs.
Revision 2071 by gezelter, Sat Mar 7 21:41:51 2015 UTC

# Line 50 | Line 50 | namespace OpenMD {
50  
51    ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) {
52  
53 <    // In a parallel computation, row and colum scans must visit all
54 <    // surrounding cells (not just the 14 upper triangular blocks that
55 <    // are used when the processor can see all pairs)
56 < #ifdef IS_MPI
53 >    // Row and colum scans must visit all surrounding cells
54      cellOffsets_.clear();
55      cellOffsets_.push_back( Vector3i(-1,-1,-1) );
56      cellOffsets_.push_back( Vector3i( 0,-1,-1) );
# Line 82 | Line 79 | namespace OpenMD {
79      cellOffsets_.push_back( Vector3i(-1, 1, 1) );
80      cellOffsets_.push_back( Vector3i( 0, 1, 1) );
81      cellOffsets_.push_back( Vector3i( 1, 1, 1) );
85 #endif    
82    }
83  
84  
# Line 306 | Line 302 | namespace OpenMD {
302            groupList_[i].push_back(j);
303          }
304        }      
305 <    }
310 <
311 <
312 <    createGtypeCutoffMap();
313 <
305 >    }    
306    }
315  
316  void ForceMatrixDecomposition::createGtypeCutoffMap() {
307      
318    GrCut.clear();
319    GrCutSq.clear();
320    GrlistSq.clear();
321
322    RealType tol = 1e-6;
323    largestRcut_ = 0.0;
324    int atid;
325    set<AtomType*> atypes = info_->getSimulatedAtomTypes();
326    
327    map<int, RealType> atypeCutoff;
328      
329    for (set<AtomType*>::iterator at = atypes.begin();
330         at != atypes.end(); ++at){
331      atid = (*at)->getIdent();
332      if (userChoseCutoff_)
333        atypeCutoff[atid] = userCutoff_;
334      else
335        atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at);
336    }
337    
338    vector<RealType> gTypeCutoffs;
339    // first we do a single loop over the cutoff groups to find the
340    // largest cutoff for any atypes present in this group.
341 #ifdef IS_MPI
342    vector<RealType> groupCutoffRow(nGroupsInRow_, 0.0);
343    groupRowToGtype.resize(nGroupsInRow_);
344    for (int cg1 = 0; cg1 < nGroupsInRow_; cg1++) {
345      vector<int> atomListRow = getAtomsInGroupRow(cg1);
346      for (vector<int>::iterator ia = atomListRow.begin();
347           ia != atomListRow.end(); ++ia) {            
348        int atom1 = (*ia);
349        atid = identsRow[atom1];
350        if (atypeCutoff[atid] > groupCutoffRow[cg1]) {
351          groupCutoffRow[cg1] = atypeCutoff[atid];
352        }
353      }
354
355      bool gTypeFound = false;
356      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
357        if (abs(groupCutoffRow[cg1] - gTypeCutoffs[gt]) < tol) {
358          groupRowToGtype[cg1] = gt;
359          gTypeFound = true;
360        }
361      }
362      if (!gTypeFound) {
363        gTypeCutoffs.push_back( groupCutoffRow[cg1] );
364        groupRowToGtype[cg1] = gTypeCutoffs.size() - 1;
365      }
366      
367    }
368    vector<RealType> groupCutoffCol(nGroupsInCol_, 0.0);
369    groupColToGtype.resize(nGroupsInCol_);
370    for (int cg2 = 0; cg2 < nGroupsInCol_; cg2++) {
371      vector<int> atomListCol = getAtomsInGroupColumn(cg2);
372      for (vector<int>::iterator jb = atomListCol.begin();
373           jb != atomListCol.end(); ++jb) {            
374        int atom2 = (*jb);
375        atid = identsCol[atom2];
376        if (atypeCutoff[atid] > groupCutoffCol[cg2]) {
377          groupCutoffCol[cg2] = atypeCutoff[atid];
378        }
379      }
380      bool gTypeFound = false;
381      for (int gt = 0; gt < gTypeCutoffs.size(); gt++) {
382        if (abs(groupCutoffCol[cg2] - gTypeCutoffs[gt]) < tol) {
383          groupColToGtype[cg2] = gt;
384          gTypeFound = true;
385        }
386      }
387      if (!gTypeFound) {
388        gTypeCutoffs.push_back( groupCutoffCol[cg2] );
389        groupColToGtype[cg2] = gTypeCutoffs.size() - 1;
390      }
391    }
392 #else
393
394    vector<RealType> groupCutoff(nGroups_, 0.0);
395    groupToGtype.resize(nGroups_);
396    for (int cg1 = 0; cg1 < nGroups_; cg1++) {
397      groupCutoff[cg1] = 0.0;
398      vector<int> atomList = getAtomsInGroupRow(cg1);
399      for (vector<int>::iterator ia = atomList.begin();
400           ia != atomList.end(); ++ia) {            
401        int atom1 = (*ia);
402        atid = idents[atom1];
403        if (atypeCutoff[atid] > groupCutoff[cg1])
404          groupCutoff[cg1] = atypeCutoff[atid];
405      }
406      
407      bool gTypeFound = false;
408      for (unsigned int gt = 0; gt < gTypeCutoffs.size(); gt++) {
409        if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) {
410          groupToGtype[cg1] = gt;
411          gTypeFound = true;
412        }
413      }
414      if (!gTypeFound) {      
415        gTypeCutoffs.push_back( groupCutoff[cg1] );
416        groupToGtype[cg1] = gTypeCutoffs.size() - 1;
417      }      
418    }
419 #endif
420
421    // Now we find the maximum group cutoff value present in the simulation
422
423    RealType groupMax = *max_element(gTypeCutoffs.begin(),
424                                     gTypeCutoffs.end());
425
426 #ifdef IS_MPI
427    MPI_Allreduce(&groupMax, &groupMax, 1, MPI_REALTYPE,
428                  MPI_MAX, MPI_COMM_WORLD);
429 #endif
430    
431    RealType tradRcut = groupMax;
432
433    GrCut.resize( gTypeCutoffs.size() );
434    GrCutSq.resize( gTypeCutoffs.size() );
435    GrlistSq.resize( gTypeCutoffs.size() );
436
437
438    for (unsigned int i = 0; i < gTypeCutoffs.size();  i++) {
439      GrCut[i].resize( gTypeCutoffs.size() , 0.0);
440      GrCutSq[i].resize( gTypeCutoffs.size(), 0.0 );
441      GrlistSq[i].resize( gTypeCutoffs.size(), 0.0 );
442
443      for (unsigned int j = 0; j < gTypeCutoffs.size();  j++) {      
444        RealType thisRcut;
445        switch(cutoffPolicy_) {
446        case TRADITIONAL:
447          thisRcut = tradRcut;
448          break;
449        case MIX:
450          thisRcut = 0.5 * (gTypeCutoffs[i] + gTypeCutoffs[j]);
451          break;
452        case MAX:
453          thisRcut = max(gTypeCutoffs[i], gTypeCutoffs[j]);
454          break;
455        default:
456          sprintf(painCave.errMsg,
457                  "ForceMatrixDecomposition::createGtypeCutoffMap "
458                  "hit an unknown cutoff policy!\n");
459          painCave.severity = OPENMD_ERROR;
460          painCave.isFatal = 1;
461          simError();
462          break;
463        }
464
465        GrCut[i][j] = thisRcut;
466        if (thisRcut > largestRcut_) largestRcut_ = thisRcut;
467        GrCutSq[i][j] = thisRcut * thisRcut;
468        GrlistSq[i][j] = pow(thisRcut + skinThickness_, 2);
469
470        // pair<int,int> key = make_pair(i,j);
471        // gTypeCutoffMap[key].first = thisRcut;
472        // gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2);
473        // sanity check
474        
475        if (userChoseCutoff_) {
476          if (abs(GrCut[i][j] - userCutoff_) > 0.0001) {
477            sprintf(painCave.errMsg,
478                    "ForceMatrixDecomposition::createGtypeCutoffMap "
479                    "user-specified rCut (%lf) does not match computed group Cutoff\n", userCutoff_);
480            painCave.severity = OPENMD_ERROR;
481            painCave.isFatal = 1;
482            simError();            
483          }
484        }
485      }
486    }
487  }
488
489  void ForceMatrixDecomposition::getGroupCutoffs(int &cg1, int &cg2, RealType &rcut, RealType &rcutsq, RealType &rlistsq) {
490    int i, j;  
491 #ifdef IS_MPI
492    i = groupRowToGtype[cg1];
493    j = groupColToGtype[cg2];
494 #else
495    i = groupToGtype[cg1];
496    j = groupToGtype[cg2];
497 #endif    
498    rcut = GrCut[i][j];
499    rcutsq = GrCutSq[i][j];
500    rlistsq = GrlistSq[i][j];
501    return;
502    //return gTypeCutoffMap[make_pair(i,j)];
503  }
504
308    int ForceMatrixDecomposition::getTopologicalDistance(int atom1, int atom2) {
309      for (unsigned int j = 0; j < toposForAtom[atom1].size(); j++) {
310        if (toposForAtom[atom1][j] == atom2)
# Line 586 | Line 389 | namespace OpenMD {
389             atomColData.electricField.end(), V3Zero);
390      }
391  
392 +    if (storageLayout_ & DataStorage::dslSitePotential) {    
393 +      fill(atomRowData.sitePotential.begin(),
394 +           atomRowData.sitePotential.end(), 0.0);
395 +      fill(atomColData.sitePotential.begin(),
396 +           atomColData.sitePotential.end(), 0.0);
397 +    }
398 +
399   #endif
400      // even in parallel, we need to zero out the local arrays:
401  
# Line 618 | Line 428 | namespace OpenMD {
428        fill(snap_->atomData.electricField.begin(),
429             snap_->atomData.electricField.end(), V3Zero);
430      }
431 +    if (storageLayout_ & DataStorage::dslSitePotential) {      
432 +      fill(snap_->atomData.sitePotential.begin(),
433 +           snap_->atomData.sitePotential.end(), 0.0);
434 +    }
435    }
436  
437  
438    void ForceMatrixDecomposition::distributeData()  {
439 +  
440 + #ifdef IS_MPI
441 +
442      snap_ = sman_->getCurrentSnapshot();
443      storageLayout_ = sman_->getStorageLayout();
627 #ifdef IS_MPI
444      
445 +    bool needsCG = true;
446 +    if(info_->getNCutoffGroups() != info_->getNAtoms())
447 +      needsCG = false;
448 +
449      // gather up the atomic positions
450      AtomPlanVectorRow->gather(snap_->atomData.position,
451                                atomRowData.position);
# Line 634 | Line 454 | namespace OpenMD {
454      
455      // gather up the cutoff group positions
456  
457 <    cgPlanVectorRow->gather(snap_->cgData.position,
458 <                            cgRowData.position);
457 >    if (needsCG) {
458 >      cgPlanVectorRow->gather(snap_->cgData.position,
459 >                              cgRowData.position);
460 >      
461 >      cgPlanVectorColumn->gather(snap_->cgData.position,
462 >                                 cgColData.position);
463 >    }
464  
640    cgPlanVectorColumn->gather(snap_->cgData.position,
641                               cgColData.position);
465  
643
644
466      if (needVelocities_) {
467        // gather up the atomic velocities
468        AtomPlanVectorColumn->gather(snap_->atomData.velocity,
469                                     atomColData.velocity);
470 <      
471 <      cgPlanVectorColumn->gather(snap_->cgData.velocity,
472 <                                 cgColData.velocity);
470 >
471 >      if (needsCG) {        
472 >        cgPlanVectorColumn->gather(snap_->cgData.velocity,
473 >                                   cgColData.velocity);
474 >      }
475      }
476  
477      
# Line 690 | Line 513 | namespace OpenMD {
513     * data structures.
514     */
515    void ForceMatrixDecomposition::collectIntermediateData() {
516 + #ifdef IS_MPI
517 +
518      snap_ = sman_->getCurrentSnapshot();
519      storageLayout_ = sman_->getStorageLayout();
520 < #ifdef IS_MPI
696 <    
520 >
521      if (storageLayout_ & DataStorage::dslDensity) {
522        
523        AtomPlanRealRow->scatter(atomRowData.density,
# Line 728 | Line 552 | namespace OpenMD {
552     * row and column-indexed data structures
553     */
554    void ForceMatrixDecomposition::distributeIntermediateData() {
555 + #ifdef IS_MPI
556      snap_ = sman_->getCurrentSnapshot();
557      storageLayout_ = sman_->getStorageLayout();
558 < #ifdef IS_MPI
558 >
559      if (storageLayout_ & DataStorage::dslFunctional) {
560        AtomPlanRealRow->gather(snap_->atomData.functional,
561                                atomRowData.functional);
# Line 749 | Line 574 | namespace OpenMD {
574    
575    
576    void ForceMatrixDecomposition::collectData() {
577 + #ifdef IS_MPI
578      snap_ = sman_->getCurrentSnapshot();
579      storageLayout_ = sman_->getStorageLayout();
580 < #ifdef IS_MPI    
580 >
581      int n = snap_->atomData.force.size();
582      vector<Vector3d> frc_tmp(n, V3Zero);
583      
# Line 832 | Line 658 | namespace OpenMD {
658          snap_->atomData.electricField[i] += efield_tmp[i];
659      }
660  
661 +    if (storageLayout_ & DataStorage::dslSitePotential) {
662  
663 +      int nsp = snap_->atomData.sitePotential.size();
664 +      vector<RealType> sp_tmp(nsp, 0.0);
665 +
666 +      AtomPlanRealRow->scatter(atomRowData.sitePotential, sp_tmp);
667 +      for (int i = 0; i < nsp; i++) {
668 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
669 +        sp_tmp[i] = 0.0;
670 +      }
671 +      
672 +      AtomPlanRealColumn->scatter(atomColData.sitePotential, sp_tmp);
673 +      for (int i = 0; i < nsp; i++)
674 +        snap_->atomData.sitePotential[i] += sp_tmp[i];
675 +    }
676 +
677      nLocal_ = snap_->getNumberOfAtoms();
678  
679      vector<potVec> pot_temp(nLocal_,
# Line 845 | Line 686 | namespace OpenMD {
686      AtomPlanPotRow->scatter(pot_row, pot_temp);
687      AtomPlanPotRow->scatter(expot_row, expot_temp);
688  
689 <    for (int ii = 0;  ii < pot_temp.size(); ii++ )
689 >    for (std::size_t ii = 0;  ii < pot_temp.size(); ii++ )
690        pairwisePot += pot_temp[ii];
691  
692 <    for (int ii = 0;  ii < expot_temp.size(); ii++ )
692 >    for (std::size_t ii = 0;  ii < expot_temp.size(); ii++ )
693        excludedPot += expot_temp[ii];
694 <        
694 >    
695      if (storageLayout_ & DataStorage::dslParticlePot) {
696        // This is the pairwise contribution to the particle pot.  The
697        // embedding contribution is added in each of the low level
# Line 873 | Line 714 | namespace OpenMD {
714      AtomPlanPotColumn->scatter(pot_col, pot_temp);    
715      AtomPlanPotColumn->scatter(expot_col, expot_temp);    
716      
717 <    for (int ii = 0;  ii < pot_temp.size(); ii++ )
717 >    for (std::size_t ii = 0;  ii < pot_temp.size(); ii++ )
718        pairwisePot += pot_temp[ii];    
719  
720 <    for (int ii = 0;  ii < expot_temp.size(); ii++ )
720 >    for (std::size_t ii = 0;  ii < expot_temp.size(); ii++ )
721        excludedPot += expot_temp[ii];    
722 <
722 >    
723      if (storageLayout_ & DataStorage::dslParticlePot) {
724        // This is the pairwise contribution to the particle pot.  The
725        // embedding contribution is added in each of the low level
# Line 944 | Line 785 | namespace OpenMD {
785     * functional) loops onto local data structures.
786     */
787    void ForceMatrixDecomposition::collectSelfData() {
788 +
789 + #ifdef IS_MPI
790      snap_ = sman_->getCurrentSnapshot();
791      storageLayout_ = sman_->getStorageLayout();
792  
950 #ifdef IS_MPI
793      for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) {
794        RealType ploc1 = embeddingPot[ii];
795        RealType ploc2 = 0.0;
# Line 963 | Line 805 | namespace OpenMD {
805   #endif
806      
807    }
966
967
808  
809    int& ForceMatrixDecomposition::getNAtomsInRow() {  
810   #ifdef IS_MPI
# Line 993 | Line 833 | namespace OpenMD {
833   #endif
834    }
835    
836 <  Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){
836 >  Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1,
837 >                                                         int cg2){
838 >
839      Vector3d d;
998    
840   #ifdef IS_MPI
841      d = cgColData.position[cg2] - cgRowData.position[cg1];
842   #else
# Line 1025 | Line 866 | namespace OpenMD {
866    }
867  
868  
869 <  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){
870 <
869 >  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1,
870 >                                                             int cg1) {
871      Vector3d d;
872      
873   #ifdef IS_MPI
# Line 1040 | Line 881 | namespace OpenMD {
881      return d;    
882    }
883    
884 <  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){
884 >  Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2,
885 >                                                                int cg2) {
886      Vector3d d;
887      
888   #ifdef IS_MPI
# Line 1071 | Line 913 | namespace OpenMD {
913  
914    }
915      
916 <  Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){
916 >  Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1,
917 >                                                          int atom2){
918      Vector3d d;
919      
920   #ifdef IS_MPI
# Line 1093 | Line 936 | namespace OpenMD {
936     * We need to exclude some overcounted interactions that result from
937     * the parallel decomposition.
938     */
939 <  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) {
939 >  bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2,
940 >                                              int cg1, int cg2) {
941      int unique_id_1, unique_id_2;
942          
943   #ifdef IS_MPI
# Line 1170 | Line 1014 | namespace OpenMD {
1014  
1015      // filling interaction blocks with pointers
1016    void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat,
1017 <                                                     int atom1, int atom2) {
1017 >                                                     int atom1, int atom2,
1018 >                                                     bool newAtom1) {
1019  
1020      idat.excluded = excludeAtomPair(atom1, atom2);
1021 <  
1021 >
1022 >    if (newAtom1) {
1023 >      
1024   #ifdef IS_MPI
1025 <    //idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]);
1026 <    idat.atid1 = identsRow[atom1];
1025 >      idat.atid1 = identsRow[atom1];
1026 >      idat.atid2 = identsCol[atom2];
1027 >      
1028 >      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
1029 >        idat.sameRegion = (regionsRow[atom1] == regionsCol[atom2]);
1030 >      } else {
1031 >        idat.sameRegion = false;
1032 >      }
1033 >      
1034 >      if (storageLayout_ & DataStorage::dslAmat) {
1035 >        idat.A1 = &(atomRowData.aMat[atom1]);
1036 >        idat.A2 = &(atomColData.aMat[atom2]);
1037 >      }
1038 >      
1039 >      if (storageLayout_ & DataStorage::dslTorque) {
1040 >        idat.t1 = &(atomRowData.torque[atom1]);
1041 >        idat.t2 = &(atomColData.torque[atom2]);
1042 >      }
1043 >      
1044 >      if (storageLayout_ & DataStorage::dslDipole) {
1045 >        idat.dipole1 = &(atomRowData.dipole[atom1]);
1046 >        idat.dipole2 = &(atomColData.dipole[atom2]);
1047 >      }
1048 >      
1049 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1050 >        idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1051 >        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1052 >      }
1053 >      
1054 >      if (storageLayout_ & DataStorage::dslDensity) {
1055 >        idat.rho1 = &(atomRowData.density[atom1]);
1056 >        idat.rho2 = &(atomColData.density[atom2]);
1057 >      }
1058 >      
1059 >      if (storageLayout_ & DataStorage::dslFunctional) {
1060 >        idat.frho1 = &(atomRowData.functional[atom1]);
1061 >        idat.frho2 = &(atomColData.functional[atom2]);
1062 >      }
1063 >      
1064 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1065 >        idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1066 >        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1067 >      }
1068 >      
1069 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1070 >        idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1071 >        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1072 >      }
1073 >      
1074 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1075 >        idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1076 >        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1077 >      }
1078 >      
1079 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1080 >        idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1081 >        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1082 >      }
1083 >      
1084 > #else
1085 >      
1086 >      idat.atid1 = idents[atom1];
1087 >      idat.atid2 = idents[atom2];
1088 >      
1089 >      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
1090 >        idat.sameRegion = (regions[atom1] == regions[atom2]);
1091 >      } else {
1092 >        idat.sameRegion = false;
1093 >      }
1094 >      
1095 >      if (storageLayout_ & DataStorage::dslAmat) {
1096 >        idat.A1 = &(snap_->atomData.aMat[atom1]);
1097 >        idat.A2 = &(snap_->atomData.aMat[atom2]);
1098 >      }
1099 >      
1100 >      if (storageLayout_ & DataStorage::dslTorque) {
1101 >        idat.t1 = &(snap_->atomData.torque[atom1]);
1102 >        idat.t2 = &(snap_->atomData.torque[atom2]);
1103 >      }
1104 >      
1105 >      if (storageLayout_ & DataStorage::dslDipole) {
1106 >        idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1107 >        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1108 >      }
1109 >      
1110 >      if (storageLayout_ & DataStorage::dslQuadrupole) {
1111 >        idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1112 >        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1113 >      }
1114 >      
1115 >      if (storageLayout_ & DataStorage::dslDensity) {    
1116 >        idat.rho1 = &(snap_->atomData.density[atom1]);
1117 >        idat.rho2 = &(snap_->atomData.density[atom2]);
1118 >      }
1119 >      
1120 >      if (storageLayout_ & DataStorage::dslFunctional) {
1121 >        idat.frho1 = &(snap_->atomData.functional[atom1]);
1122 >        idat.frho2 = &(snap_->atomData.functional[atom2]);
1123 >      }
1124 >      
1125 >      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1126 >        idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1127 >        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1128 >      }
1129 >      
1130 >      if (storageLayout_ & DataStorage::dslParticlePot) {
1131 >        idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1132 >        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1133 >      }
1134 >      
1135 >      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1136 >        idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1137 >        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1138 >      }
1139 >      
1140 >      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1141 >        idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1142 >        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1143 >      }
1144 > #endif
1145 >      
1146 >    } else {
1147 >      // atom1 is not new, so don't bother updating properties of that atom:
1148 > #ifdef IS_MPI
1149      idat.atid2 = identsCol[atom2];
1150  
1151      if (regionsRow[atom1] >= 0 && regionsCol[atom2] >= 0) {
# Line 1186 | Line 1155 | namespace OpenMD {
1155      }
1156  
1157      if (storageLayout_ & DataStorage::dslAmat) {
1189      idat.A1 = &(atomRowData.aMat[atom1]);
1158        idat.A2 = &(atomColData.aMat[atom2]);
1159      }
1160      
1161      if (storageLayout_ & DataStorage::dslTorque) {
1194      idat.t1 = &(atomRowData.torque[atom1]);
1162        idat.t2 = &(atomColData.torque[atom2]);
1163      }
1164  
1165      if (storageLayout_ & DataStorage::dslDipole) {
1199      idat.dipole1 = &(atomRowData.dipole[atom1]);
1166        idat.dipole2 = &(atomColData.dipole[atom2]);
1167      }
1168  
1169      if (storageLayout_ & DataStorage::dslQuadrupole) {
1204      idat.quadrupole1 = &(atomRowData.quadrupole[atom1]);
1170        idat.quadrupole2 = &(atomColData.quadrupole[atom2]);
1171      }
1172  
1173      if (storageLayout_ & DataStorage::dslDensity) {
1209      idat.rho1 = &(atomRowData.density[atom1]);
1174        idat.rho2 = &(atomColData.density[atom2]);
1175      }
1176  
1177      if (storageLayout_ & DataStorage::dslFunctional) {
1214      idat.frho1 = &(atomRowData.functional[atom1]);
1178        idat.frho2 = &(atomColData.functional[atom2]);
1179      }
1180  
1181      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1219      idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]);
1182        idat.dfrho2 = &(atomColData.functionalDerivative[atom2]);
1183      }
1184  
1185      if (storageLayout_ & DataStorage::dslParticlePot) {
1224      idat.particlePot1 = &(atomRowData.particlePot[atom1]);
1186        idat.particlePot2 = &(atomColData.particlePot[atom2]);
1187      }
1188  
1189      if (storageLayout_ & DataStorage::dslSkippedCharge) {              
1229      idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]);
1190        idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]);
1191      }
1192  
1193 <    if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1234 <      idat.flucQ1 = &(atomRowData.flucQPos[atom1]);
1193 >    if (storageLayout_ & DataStorage::dslFlucQPosition) {
1194        idat.flucQ2 = &(atomColData.flucQPos[atom2]);
1195      }
1196  
1197 < #else
1239 <    
1240 <    //idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]);
1241 <    idat.atid1 = idents[atom1];
1197 > #else  
1198      idat.atid2 = idents[atom2];
1199  
1200      if (regions[atom1] >= 0 && regions[atom2] >= 0) {
# Line 1248 | Line 1204 | namespace OpenMD {
1204      }
1205  
1206      if (storageLayout_ & DataStorage::dslAmat) {
1251      idat.A1 = &(snap_->atomData.aMat[atom1]);
1207        idat.A2 = &(snap_->atomData.aMat[atom2]);
1208      }
1209  
1210      if (storageLayout_ & DataStorage::dslTorque) {
1256      idat.t1 = &(snap_->atomData.torque[atom1]);
1211        idat.t2 = &(snap_->atomData.torque[atom2]);
1212      }
1213  
1214      if (storageLayout_ & DataStorage::dslDipole) {
1261      idat.dipole1 = &(snap_->atomData.dipole[atom1]);
1215        idat.dipole2 = &(snap_->atomData.dipole[atom2]);
1216      }
1217  
1218      if (storageLayout_ & DataStorage::dslQuadrupole) {
1266      idat.quadrupole1 = &(snap_->atomData.quadrupole[atom1]);
1219        idat.quadrupole2 = &(snap_->atomData.quadrupole[atom2]);
1220      }
1221  
1222      if (storageLayout_ & DataStorage::dslDensity) {    
1271      idat.rho1 = &(snap_->atomData.density[atom1]);
1223        idat.rho2 = &(snap_->atomData.density[atom2]);
1224      }
1225  
1226      if (storageLayout_ & DataStorage::dslFunctional) {
1276      idat.frho1 = &(snap_->atomData.functional[atom1]);
1227        idat.frho2 = &(snap_->atomData.functional[atom2]);
1228      }
1229  
1230      if (storageLayout_ & DataStorage::dslFunctionalDerivative) {
1281      idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]);
1231        idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]);
1232      }
1233  
1234      if (storageLayout_ & DataStorage::dslParticlePot) {
1286      idat.particlePot1 = &(snap_->atomData.particlePot[atom1]);
1235        idat.particlePot2 = &(snap_->atomData.particlePot[atom2]);
1236      }
1237  
1238      if (storageLayout_ & DataStorage::dslSkippedCharge) {
1291      idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]);
1239        idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]);
1240      }
1241  
1242      if (storageLayout_ & DataStorage::dslFlucQPosition) {              
1296      idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]);
1243        idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]);
1244      }
1245  
1246   #endif
1247 +    }
1248    }
1302
1249    
1250 <  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) {    
1250 >  void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat,
1251 >                                                       int atom1, int atom2) {  
1252   #ifdef IS_MPI
1253      pot_row[atom1] += RealType(0.5) *  *(idat.pot);
1254      pot_col[atom2] += RealType(0.5) *  *(idat.pot);
# Line 1321 | Line 1268 | namespace OpenMD {
1268        atomColData.electricField[atom2] += *(idat.eField2);
1269      }
1270  
1271 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1272 +      atomRowData.sitePotential[atom1] += *(idat.sPot1);
1273 +      atomColData.sitePotential[atom2] += *(idat.sPot2);
1274 +    }
1275 +
1276   #else
1277      pairwisePot += *(idat.pot);
1278      excludedPot += *(idat.excludedPot);
# Line 1347 | Line 1299 | namespace OpenMD {
1299        snap_->atomData.electricField[atom2] += *(idat.eField2);
1300      }
1301  
1302 +    if (storageLayout_ & DataStorage::dslSitePotential) {              
1303 +      snap_->atomData.sitePotential[atom1] += *(idat.sPot1);
1304 +      snap_->atomData.sitePotential[atom2] += *(idat.sPot2);
1305 +    }
1306 +
1307   #endif
1308      
1309    }
# Line 1354 | Line 1311 | namespace OpenMD {
1311    /*
1312     * buildNeighborList
1313     *
1314 <   * first element of pair is row-indexed CutoffGroup
1315 <   * second element of pair is column-indexed CutoffGroup
1314 >   * Constructs the Verlet neighbor list for a force-matrix
1315 >   * decomposition.  In this case, each processor is responsible for
1316 >   * row-site interactions with column-sites.
1317 >   *
1318 >   * neighborList is returned as a packed array of neighboring
1319 >   * column-ordered CutoffGroups.  The starting position in
1320 >   * neighborList for each row-ordered CutoffGroup is given by the
1321 >   * returned vector point.
1322     */
1323 <  void ForceMatrixDecomposition::buildNeighborList(vector<pair<int,int> >& neighborList) {
1324 <    
1323 >  void ForceMatrixDecomposition::buildNeighborList(vector<int>& neighborList,
1324 >                                                   vector<int>& point) {
1325      neighborList.clear();
1326 <    groupCutoffs cuts;
1326 >    point.clear();
1327 >    int len = 0;
1328 >    
1329      bool doAllPairs = false;
1330  
1366    RealType rList_ = (largestRcut_ + skinThickness_);
1367    RealType rcut, rcutsq, rlistsq;
1331      Snapshot* snap_ = sman_->getCurrentSnapshot();
1332      Mat3x3d box;
1333      Mat3x3d invBox;
# Line 1376 | Line 1339 | namespace OpenMD {
1339   #ifdef IS_MPI
1340      cellListRow_.clear();
1341      cellListCol_.clear();
1342 +    point.resize(nGroupsInRow_+1);
1343   #else
1344      cellList_.clear();
1345 +    point.resize(nGroups_+1);
1346   #endif
1347      
1348      if (!usePeriodicBoundaryConditions_) {
# Line 1388 | Line 1353 | namespace OpenMD {
1353        invBox = snap_->getInvHmat();
1354      }
1355      
1356 <    Vector3d boxX = box.getColumn(0);
1357 <    Vector3d boxY = box.getColumn(1);
1358 <    Vector3d boxZ = box.getColumn(2);
1356 >    Vector3d A = box.getColumn(0);
1357 >    Vector3d B = box.getColumn(1);
1358 >    Vector3d C = box.getColumn(2);
1359 >
1360 >    // Required for triclinic cells
1361 >    Vector3d AxB = cross(A, B);
1362 >    Vector3d BxC = cross(B, C);
1363 >    Vector3d CxA = cross(C, A);
1364 >
1365 >    // unit vectors perpendicular to the faces of the triclinic cell:
1366 >    AxB.normalize();
1367 >    BxC.normalize();
1368 >    CxA.normalize();
1369 >
1370 >    // A set of perpendicular lengths in triclinic cells:
1371 >    RealType Wa = abs(dot(A, BxC));
1372 >    RealType Wb = abs(dot(B, CxA));
1373 >    RealType Wc = abs(dot(C, AxB));
1374      
1375 <    nCells_.x() = int( boxX.length() / rList_ );
1376 <    nCells_.y() = int( boxY.length() / rList_ );
1377 <    nCells_.z() = int( boxZ.length() / rList_ );
1375 >    nCells_.x() = int( Wa / rList_ );
1376 >    nCells_.y() = int( Wb / rList_ );
1377 >    nCells_.z() = int( Wc / rList_ );
1378      
1379      // handle small boxes where the cell offsets can end up repeating cells
1400    
1380      if (nCells_.x() < 3) doAllPairs = true;
1381      if (nCells_.y() < 3) doAllPairs = true;
1382      if (nCells_.z() < 3) doAllPairs = true;
# Line 1412 | Line 1391 | namespace OpenMD {
1391   #endif
1392      
1393      if (!doAllPairs) {
1394 +      
1395   #ifdef IS_MPI
1396        
1397        for (int i = 0; i < nGroupsInRow_; i++) {
# Line 1470 | Line 1450 | namespace OpenMD {
1450          // add this cutoff group to the list of groups in this cell;
1451          cellListCol_[cellIndex].push_back(i);
1452        }
1453 <      
1453 >            
1454   #else
1455        for (int i = 0; i < nGroups_; i++) {
1456          rs = snap_->cgData.position[i];
# Line 1503 | Line 1483 | namespace OpenMD {
1483  
1484   #endif
1485  
1486 <      for (int m1z = 0; m1z < nCells_.z(); m1z++) {
1487 <        for (int m1y = 0; m1y < nCells_.y(); m1y++) {
1488 <          for (int m1x = 0; m1x < nCells_.x(); m1x++) {
1489 <            Vector3i m1v(m1x, m1y, m1z);
1510 <            int m1 = Vlinear(m1v, nCells_);
1511 <            
1512 <            for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1513 <                 os != cellOffsets_.end(); ++os) {
1514 <              
1515 <              Vector3i m2v = m1v + (*os);
1516 <            
1486 > #ifdef IS_MPI
1487 >      for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1488 >        rs = cgRowData.position[j1];
1489 > #else
1490  
1491 <              if (m2v.x() >= nCells_.x()) {
1492 <                m2v.x() = 0;          
1493 <              } else if (m2v.x() < 0) {
1494 <                m2v.x() = nCells_.x() - 1;
1495 <              }
1491 >      for (int j1 = 0; j1 < nGroups_; j1++) {
1492 >        rs = snap_->cgData.position[j1];
1493 > #endif
1494 >        point[j1] = len;
1495 >        
1496 >        // scaled positions relative to the box vectors
1497 >        scaled = invBox * rs;
1498 >        
1499 >        // wrap the vector back into the unit box by subtracting integer box
1500 >        // numbers
1501 >        for (int j = 0; j < 3; j++) {
1502 >          scaled[j] -= roundMe(scaled[j]);
1503 >          scaled[j] += 0.5;
1504 >          // Handle the special case when an object is exactly on the
1505 >          // boundary (a scaled coordinate of 1.0 is the same as
1506 >          // scaled coordinate of 0.0)
1507 >          if (scaled[j] >= 1.0) scaled[j] -= 1.0;
1508 >        }
1509 >        
1510 >        // find xyz-indices of cell that cutoffGroup is in.
1511 >        whichCell.x() = nCells_.x() * scaled.x();
1512 >        whichCell.y() = nCells_.y() * scaled.y();
1513 >        whichCell.z() = nCells_.z() * scaled.z();
1514 >        
1515 >        for (vector<Vector3i>::iterator os = cellOffsets_.begin();
1516 >             os != cellOffsets_.end(); ++os) {
1517                
1518 <              if (m2v.y() >= nCells_.y()) {
1525 <                m2v.y() = 0;          
1526 <              } else if (m2v.y() < 0) {
1527 <                m2v.y() = nCells_.y() - 1;
1528 <              }
1529 <              
1530 <              if (m2v.z() >= nCells_.z()) {
1531 <                m2v.z() = 0;          
1532 <              } else if (m2v.z() < 0) {
1533 <                m2v.z() = nCells_.z() - 1;
1534 <              }
1518 >          Vector3i m2v = whichCell + (*os);
1519  
1520 <              int m2 = Vlinear (m2v, nCells_);
1521 <              
1520 >          if (m2v.x() >= nCells_.x()) {
1521 >            m2v.x() = 0;          
1522 >          } else if (m2v.x() < 0) {
1523 >            m2v.x() = nCells_.x() - 1;
1524 >          }
1525 >          
1526 >          if (m2v.y() >= nCells_.y()) {
1527 >            m2v.y() = 0;          
1528 >          } else if (m2v.y() < 0) {
1529 >            m2v.y() = nCells_.y() - 1;
1530 >          }
1531 >          
1532 >          if (m2v.z() >= nCells_.z()) {
1533 >            m2v.z() = 0;          
1534 >          } else if (m2v.z() < 0) {
1535 >            m2v.z() = nCells_.z() - 1;
1536 >          }
1537 >          int m2 = Vlinear (m2v, nCells_);                                      
1538   #ifdef IS_MPI
1539 <              for (vector<int>::iterator j1 = cellListRow_[m1].begin();
1540 <                   j1 != cellListRow_[m1].end(); ++j1) {
1541 <                for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1542 <                     j2 != cellListCol_[m2].end(); ++j2) {
1543 <                  
1544 <                  // In parallel, we need to visit *all* pairs of row
1545 <                  // & column indicies and will divide labor in the
1546 <                  // force evaluation later.
1547 <                  dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)];
1548 <                  if (usePeriodicBoundaryConditions_) {
1549 <                    snap_->wrapVector(dr);
1550 <                  }
1551 <                  getGroupCutoffs( (*j1), (*j2), rcut, rcutsq, rlistsq );
1552 <                  if (dr.lengthSquare() < rlistsq) {
1553 <                    neighborList.push_back(make_pair((*j1), (*j2)));
1554 <                  }                  
1555 <                }
1556 <              }
1539 >          for (vector<int>::iterator j2 = cellListCol_[m2].begin();
1540 >               j2 != cellListCol_[m2].end(); ++j2) {
1541 >            
1542 >            // In parallel, we need to visit *all* pairs of row
1543 >            // & column indicies and will divide labor in the
1544 >            // force evaluation later.
1545 >            dr = cgColData.position[(*j2)] - rs;
1546 >            if (usePeriodicBoundaryConditions_) {
1547 >              snap_->wrapVector(dr);
1548 >            }
1549 >            if (dr.lengthSquare() < rListSq_) {
1550 >              neighborList.push_back( (*j2) );
1551 >              ++len;
1552 >            }                
1553 >          }        
1554   #else
1555 <              for (vector<int>::iterator j1 = cellList_[m1].begin();
1556 <                   j1 != cellList_[m1].end(); ++j1) {
1557 <                for (vector<int>::iterator j2 = cellList_[m2].begin();
1558 <                     j2 != cellList_[m2].end(); ++j2) {
1559 <    
1560 <                  // Always do this if we're in different cells or if
1561 <                  // we're in the same cell and the global index of
1562 <                  // the j2 cutoff group is greater than or equal to
1563 <                  // the j1 cutoff group.  Note that Rappaport's code
1564 <                  // has a "less than" conditional here, but that
1565 <                  // deals with atom-by-atom computation.  OpenMD
1566 <                  // allows atoms within a single cutoff group to
1567 <                  // interact with each other.
1568 <
1569 <                  if (m2 != m1 || (*j2) >= (*j1) ) {
1570 <
1571 <                    dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)];
1575 <                    if (usePeriodicBoundaryConditions_) {
1576 <                      snap_->wrapVector(dr);
1577 <                    }
1578 <                    getGroupCutoffs( (*j1), (*j2), rcut, rcutsq, rlistsq );
1579 <                    if (dr.lengthSquare() < rlistsq) {
1580 <                      neighborList.push_back(make_pair((*j1), (*j2)));
1581 <                    }
1582 <                  }
1583 <                }
1555 >          for (vector<int>::iterator j2 = cellList_[m2].begin();
1556 >               j2 != cellList_[m2].end(); ++j2) {
1557 >          
1558 >            // Always do this if we're in different cells or if
1559 >            // we're in the same cell and the global index of
1560 >            // the j2 cutoff group is greater than or equal to
1561 >            // the j1 cutoff group.  Note that Rappaport's code
1562 >            // has a "less than" conditional here, but that
1563 >            // deals with atom-by-atom computation.  OpenMD
1564 >            // allows atoms within a single cutoff group to
1565 >            // interact with each other.
1566 >            
1567 >            if ( (*j2) >= j1 ) {
1568 >              
1569 >              dr = snap_->cgData.position[(*j2)] - rs;
1570 >              if (usePeriodicBoundaryConditions_) {
1571 >                snap_->wrapVector(dr);
1572                }
1573 < #endif
1573 >              if ( dr.lengthSquare() < rListSq_) {
1574 >                neighborList.push_back( (*j2) );
1575 >                ++len;
1576 >              }
1577              }
1578 <          }
1578 >          }                
1579 > #endif
1580          }
1581 <      }
1581 >      }      
1582      } else {
1583        // branch to do all cutoff group pairs
1584   #ifdef IS_MPI
1585        for (int j1 = 0; j1 < nGroupsInRow_; j1++) {
1586 +        point[j1] = len;
1587 +        rs = cgRowData.position[j1];
1588          for (int j2 = 0; j2 < nGroupsInCol_; j2++) {    
1589 <          dr = cgColData.position[j2] - cgRowData.position[j1];
1589 >          dr = cgColData.position[j2] - rs;
1590            if (usePeriodicBoundaryConditions_) {
1591              snap_->wrapVector(dr);
1592            }
1593 <          getGroupCutoffs( j1, j2, rcut, rcutsq, rlistsq);
1594 <          if (dr.lengthSquare() < rlistsq) {
1595 <            neighborList.push_back(make_pair(j1, j2));
1593 >          if (dr.lengthSquare() < rListSq_) {
1594 >            neighborList.push_back( j2 );
1595 >            ++len;
1596            }
1597          }
1598        }      
1599   #else
1600        // include all groups here.
1601        for (int j1 = 0; j1 < nGroups_; j1++) {
1602 +        point[j1] = len;
1603 +        rs = snap_->cgData.position[j1];
1604          // include self group interactions j2 == j1
1605          for (int j2 = j1; j2 < nGroups_; j2++) {
1606 <          dr = snap_->cgData.position[j2] - snap_->cgData.position[j1];
1606 >          dr = snap_->cgData.position[j2] - rs;
1607            if (usePeriodicBoundaryConditions_) {
1608              snap_->wrapVector(dr);
1609            }
1610 <          getGroupCutoffs( j1, j2, rcut, rcutsq, rlistsq );
1611 <          if (dr.lengthSquare() < rlistsq) {
1612 <            neighborList.push_back(make_pair(j1, j2));
1610 >          if (dr.lengthSquare() < rListSq_) {
1611 >            neighborList.push_back( j2 );
1612 >            ++len;
1613            }
1614          }    
1615        }
1616   #endif
1617      }
1618 <      
1618 >
1619 > #ifdef IS_MPI
1620 >    point[nGroupsInRow_] = len;
1621 > #else
1622 >    point[nGroups_] = len;
1623 > #endif
1624 >  
1625      // save the local cutoff group positions for the check that is
1626      // done on each loop:
1627      saved_CG_positions_.clear();
1628 +    saved_CG_positions_.reserve(nGroups_);
1629      for (int i = 0; i < nGroups_; i++)
1630        saved_CG_positions_.push_back(snap_->cgData.position[i]);
1631    }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines