36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
|
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
< |
* [4] Vardeman & Gezelter, in progress (2009). |
39 |
> |
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
> |
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
42 |
|
|
43 |
|
/** |
61 |
|
#include "io/ForceFieldOptions.hpp" |
62 |
|
#include "UseTheForce/ForceField.hpp" |
63 |
|
#include "nonbonded/SwitchingFunction.hpp" |
64 |
+ |
#ifdef IS_MPI |
65 |
+ |
#include <mpi.h> |
66 |
+ |
#endif |
67 |
|
|
68 |
|
using namespace std; |
69 |
|
namespace OpenMD { |
72 |
|
forceField_(ff), simParams_(simParams), |
73 |
|
ndf_(0), fdf_local(0), ndfRaw_(0), ndfTrans_(0), nZconstraint_(0), |
74 |
|
nGlobalMols_(0), nGlobalAtoms_(0), nGlobalCutoffGroups_(0), |
75 |
< |
nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), |
75 |
> |
nGlobalIntegrableObjects_(0), nGlobalRigidBodies_(0), nGlobalFluctuatingCharges_(0), |
76 |
|
nAtoms_(0), nBonds_(0), nBends_(0), nTorsions_(0), nInversions_(0), |
77 |
|
nRigidBodies_(0), nIntegrableObjects_(0), nCutoffGroups_(0), |
78 |
< |
nConstraints_(0), sman_(NULL), topologyDone_(false), |
78 |
> |
nConstraints_(0), nFluctuatingCharges_(0), sman_(NULL), topologyDone_(false), |
79 |
|
calcBoxDipole_(false), useAtomicVirial_(true) { |
80 |
|
|
81 |
|
MoleculeStamp* molStamp; |
225 |
|
|
226 |
|
|
227 |
|
void SimInfo::calcNdf() { |
228 |
< |
int ndf_local; |
228 |
> |
int ndf_local, nfq_local; |
229 |
|
MoleculeIterator i; |
230 |
|
vector<StuntDouble*>::iterator j; |
231 |
+ |
vector<Atom*>::iterator k; |
232 |
+ |
|
233 |
|
Molecule* mol; |
234 |
|
StuntDouble* integrableObject; |
235 |
+ |
Atom* atom; |
236 |
|
|
237 |
|
ndf_local = 0; |
238 |
+ |
nfq_local = 0; |
239 |
|
|
240 |
|
for (mol = beginMolecule(i); mol != NULL; mol = nextMolecule(i)) { |
241 |
|
for (integrableObject = mol->beginIntegrableObject(j); integrableObject != NULL; |
250 |
|
ndf_local += 3; |
251 |
|
} |
252 |
|
} |
245 |
– |
|
253 |
|
} |
254 |
+ |
for (atom = mol->beginFluctuatingCharge(k); atom != NULL; |
255 |
+ |
atom = mol->nextFluctuatingCharge(k)) { |
256 |
+ |
if (atom->isFluctuatingCharge()) { |
257 |
+ |
nfq_local++; |
258 |
+ |
} |
259 |
+ |
} |
260 |
|
} |
261 |
|
|
262 |
|
// n_constraints is local, so subtract them on each processor |
264 |
|
|
265 |
|
#ifdef IS_MPI |
266 |
|
MPI_Allreduce(&ndf_local,&ndf_,1,MPI_INT,MPI_SUM, MPI_COMM_WORLD); |
267 |
+ |
MPI_Allreduce(&nfq_local,&nGlobalFluctuatingCharges_,1, MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
268 |
|
#else |
269 |
|
ndf_ = ndf_local; |
270 |
+ |
nGlobalFluctuatingCharges_ = nfq_local; |
271 |
|
#endif |
272 |
|
|
273 |
|
// nZconstraints_ is global, as are the 3 COM translations for the |
709 |
|
Atom* atom; |
710 |
|
set<AtomType*> atomTypes; |
711 |
|
|
712 |
< |
for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
713 |
< |
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
712 |
> |
for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
713 |
> |
for(atom = mol->beginAtom(ai); atom != NULL; |
714 |
> |
atom = mol->nextAtom(ai)) { |
715 |
|
atomTypes.insert(atom->getAtomType()); |
716 |
|
} |
717 |
|
} |
718 |
< |
|
718 |
> |
|
719 |
|
#ifdef IS_MPI |
720 |
|
|
721 |
|
// loop over the found atom types on this processor, and add their |
722 |
|
// numerical idents to a vector: |
723 |
< |
|
723 |
> |
|
724 |
|
vector<int> foundTypes; |
725 |
|
set<AtomType*>::iterator i; |
726 |
|
for (i = atomTypes.begin(); i != atomTypes.end(); ++i) |
729 |
|
// count_local holds the number of found types on this processor |
730 |
|
int count_local = foundTypes.size(); |
731 |
|
|
716 |
– |
// count holds the total number of found types on all processors |
717 |
– |
// (some will be redundant with the ones found locally): |
718 |
– |
int count; |
719 |
– |
MPI::COMM_WORLD.Allreduce(&count_local, &count, 1, MPI::INT, MPI::SUM); |
720 |
– |
|
721 |
– |
// create a vector to hold the globally found types, and resize it: |
722 |
– |
vector<int> ftGlobal; |
723 |
– |
ftGlobal.resize(count); |
724 |
– |
vector<int> counts; |
725 |
– |
|
732 |
|
int nproc = MPI::COMM_WORLD.Get_size(); |
733 |
< |
counts.resize(nproc); |
734 |
< |
vector<int> disps; |
735 |
< |
disps.resize(nproc); |
733 |
> |
|
734 |
> |
// we need arrays to hold the counts and displacement vectors for |
735 |
> |
// all processors |
736 |
> |
vector<int> counts(nproc, 0); |
737 |
> |
vector<int> disps(nproc, 0); |
738 |
|
|
739 |
< |
// now spray out the foundTypes to all the other processors: |
739 |
> |
// fill the counts array |
740 |
> |
MPI::COMM_WORLD.Allgather(&count_local, 1, MPI::INT, &counts[0], |
741 |
> |
1, MPI::INT); |
742 |
> |
|
743 |
> |
// use the processor counts to compute the displacement array |
744 |
> |
disps[0] = 0; |
745 |
> |
int totalCount = counts[0]; |
746 |
> |
for (int iproc = 1; iproc < nproc; iproc++) { |
747 |
> |
disps[iproc] = disps[iproc-1] + counts[iproc-1]; |
748 |
> |
totalCount += counts[iproc]; |
749 |
> |
} |
750 |
> |
|
751 |
> |
// we need a (possibly redundant) set of all found types: |
752 |
> |
vector<int> ftGlobal(totalCount); |
753 |
|
|
754 |
+ |
// now spray out the foundTypes to all the other processors: |
755 |
|
MPI::COMM_WORLD.Allgatherv(&foundTypes[0], count_local, MPI::INT, |
756 |
< |
&ftGlobal[0], &counts[0], &disps[0], MPI::INT); |
756 |
> |
&ftGlobal[0], &counts[0], &disps[0], |
757 |
> |
MPI::INT); |
758 |
|
|
759 |
+ |
vector<int>::iterator j; |
760 |
+ |
|
761 |
|
// foundIdents is a stl set, so inserting an already found ident |
762 |
|
// will have no effect. |
763 |
|
set<int> foundIdents; |
764 |
< |
vector<int>::iterator j; |
764 |
> |
|
765 |
|
for (j = ftGlobal.begin(); j != ftGlobal.end(); ++j) |
766 |
|
foundIdents.insert((*j)); |
767 |
|
|
768 |
|
// now iterate over the foundIdents and get the actual atom types |
769 |
|
// that correspond to these: |
770 |
|
set<int>::iterator it; |
771 |
< |
for (it = foundIdents.begin(); it != foundIdents.end(); ++it) |
771 |
> |
for (it = foundIdents.begin(); it != foundIdents.end(); ++it) |
772 |
|
atomTypes.insert( forceField_->getAtomType((*it)) ); |
773 |
|
|
774 |
|
#endif |
775 |
< |
|
775 |
> |
|
776 |
|
return atomTypes; |
777 |
|
} |
778 |
|
|
791 |
|
int usesElectrostatic = 0; |
792 |
|
int usesMetallic = 0; |
793 |
|
int usesDirectional = 0; |
794 |
+ |
int usesFluctuatingCharges = 0; |
795 |
|
//loop over all of the atom types |
796 |
|
for (i = atomTypes.begin(); i != atomTypes.end(); ++i) { |
797 |
|
usesElectrostatic |= (*i)->isElectrostatic(); |
798 |
|
usesMetallic |= (*i)->isMetal(); |
799 |
|
usesDirectional |= (*i)->isDirectional(); |
800 |
+ |
usesFluctuatingCharges |= (*i)->isFluctuatingCharge(); |
801 |
|
} |
802 |
|
|
803 |
|
#ifdef IS_MPI |
810 |
|
|
811 |
|
temp = usesElectrostatic; |
812 |
|
MPI_Allreduce(&temp, &usesElectrostaticAtoms_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); |
813 |
+ |
|
814 |
+ |
temp = usesFluctuatingCharges; |
815 |
+ |
MPI_Allreduce(&temp, &usesFluctuatingCharges_, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); |
816 |
|
#else |
817 |
|
|
818 |
|
usesDirectionalAtoms_ = usesDirectional; |
819 |
|
usesMetallicAtoms_ = usesMetallic; |
820 |
|
usesElectrostaticAtoms_ = usesElectrostatic; |
821 |
+ |
usesFluctuatingCharges_ = usesFluctuatingCharges; |
822 |
|
|
823 |
|
#endif |
824 |
|
|
890 |
|
massFactors_.clear(); |
891 |
|
massFactors_.resize(getNAtoms(), 1.0); |
892 |
|
|
862 |
– |
cerr << "mfs in si = " << massFactors_.size() << "\n"; |
893 |
|
for(mol = beginMolecule(mi); mol != NULL; mol = nextMolecule(mi)) { |
894 |
|
for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
895 |
|
cg = mol->nextCutoffGroup(ci)) { |
1211 |
|
|
1212 |
|
det = intTensor.determinant(); |
1213 |
|
sysconstants = geomCnst/(RealType)nGlobalIntegrableObjects_; |
1214 |
< |
volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,3.0/2.0)*sqrt(det); |
1214 |
> |
volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,geomCnst)*sqrt(det); |
1215 |
|
return; |
1216 |
|
} |
1217 |
|
|
1227 |
|
|
1228 |
|
detI = intTensor.determinant(); |
1229 |
|
sysconstants = geomCnst/(RealType)nGlobalIntegrableObjects_; |
1230 |
< |
volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,3.0/2.0)*sqrt(detI); |
1230 |
> |
volume = 4.0/3.0*NumericConstant::PI*pow(sysconstants,geomCnst)*sqrt(detI); |
1231 |
|
return; |
1232 |
|
} |
1233 |
|
/* |