35 |
|
* |
36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
< |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
38 |
> |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
39 |
|
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
|
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
44 |
|
* @file SimCreator.cpp |
45 |
|
* @author tlin |
46 |
|
* @date 11/03/2004 |
47 |
– |
* @time 13:51am |
47 |
|
* @version 1.0 |
48 |
|
*/ |
49 |
+ |
|
50 |
+ |
#ifdef IS_MPI |
51 |
+ |
#include "mpi.h" |
52 |
+ |
#include "math/ParallelRandNumGen.hpp" |
53 |
+ |
#endif |
54 |
+ |
|
55 |
|
#include <exception> |
56 |
|
#include <iostream> |
57 |
|
#include <sstream> |
89 |
|
#include "types/FixedChargeAdapter.hpp" |
90 |
|
#include "types/FluctuatingChargeAdapter.hpp" |
91 |
|
|
87 |
– |
#ifdef IS_MPI |
88 |
– |
#include "mpi.h" |
89 |
– |
#include "math/ParallelRandNumGen.hpp" |
90 |
– |
#endif |
92 |
|
|
93 |
|
namespace OpenMD { |
94 |
|
|
101 |
|
#ifdef IS_MPI |
102 |
|
int streamSize; |
103 |
|
const int masterNode = 0; |
104 |
< |
int commStatus; |
104 |
> |
|
105 |
|
if (worldRank == masterNode) { |
106 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
106 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
107 |
> |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
108 |
|
#endif |
109 |
|
SimplePreprocessor preprocessor; |
110 |
< |
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
110 |
> |
preprocessor.preprocess(rawMetaDataStream, filename, |
111 |
> |
startOfMetaDataBlock, ppStream); |
112 |
|
|
113 |
|
#ifdef IS_MPI |
114 |
< |
//brocasting the stream size |
114 |
> |
//broadcasting the stream size |
115 |
|
streamSize = ppStream.str().size() +1; |
116 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
116 |
> |
MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
117 |
> |
MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
118 |
> |
streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
119 |
|
|
120 |
< |
commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
121 |
< |
|
122 |
< |
|
120 |
> |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
121 |
> |
// MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
122 |
> |
// streamSize, MPI::CHAR, masterNode); |
123 |
> |
|
124 |
|
} else { |
125 |
|
|
126 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
126 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
127 |
> |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
128 |
|
|
129 |
|
//get stream size |
130 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
131 |
< |
|
130 |
> |
MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
131 |
> |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
132 |
|
char* buf = new char[streamSize]; |
133 |
|
assert(buf); |
134 |
|
|
135 |
|
//receive file content |
136 |
< |
commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
137 |
< |
|
136 |
> |
MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
137 |
> |
// MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
138 |
> |
|
139 |
|
ppStream.str(buf); |
140 |
|
delete [] buf; |
133 |
– |
|
141 |
|
} |
142 |
|
#endif |
143 |
|
// Create a scanner that reads from the input stream |
159 |
|
parser.initializeASTFactory(factory); |
160 |
|
parser.setASTFactory(&factory); |
161 |
|
parser.mdfile(); |
155 |
– |
|
162 |
|
// Create a tree parser that reads information into Globals |
163 |
|
MDTreeParser treeParser; |
164 |
|
treeParser.initializeASTFactory(factory); |
261 |
|
std::string mdRawData; |
262 |
|
int metaDataBlockStart = -1; |
263 |
|
int metaDataBlockEnd = -1; |
264 |
< |
int i; |
265 |
< |
streamoff mdOffset(0); |
264 |
> |
int i, j; |
265 |
> |
streamoff mdOffset; |
266 |
|
int mdFileVersion; |
267 |
|
|
268 |
+ |
// Create a string for embedding the version information in the MetaData |
269 |
+ |
std::string version; |
270 |
+ |
version.assign("## Last run using OpenMD Version: "); |
271 |
+ |
version.append(OPENMD_VERSION_MAJOR); |
272 |
+ |
version.append("."); |
273 |
+ |
version.append(OPENMD_VERSION_MINOR); |
274 |
|
|
275 |
+ |
std::string svnrev; |
276 |
+ |
//convert a macro from compiler to a string in c++ |
277 |
+ |
STR_DEFINE(svnrev, SVN_REV ); |
278 |
+ |
version.append(" Revision: "); |
279 |
+ |
// If there's no SVN revision, just call this the RELEASE revision. |
280 |
+ |
if (!svnrev.empty()) { |
281 |
+ |
version.append(svnrev); |
282 |
+ |
} else { |
283 |
+ |
version.append("RELEASE"); |
284 |
+ |
} |
285 |
+ |
|
286 |
|
#ifdef IS_MPI |
287 |
|
const int masterNode = 0; |
288 |
|
if (worldRank == masterNode) { |
377 |
|
|
378 |
|
mdRawData.clear(); |
379 |
|
|
380 |
+ |
bool foundVersion = false; |
381 |
+ |
|
382 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
383 |
|
mdFile_.getline(buffer, bufferSize); |
384 |
< |
mdRawData += buffer; |
384 |
> |
std::string line = trimLeftCopy(buffer); |
385 |
> |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
386 |
> |
if (static_cast<size_t>(j) != string::npos) { |
387 |
> |
foundVersion = true; |
388 |
> |
mdRawData += version; |
389 |
> |
} else { |
390 |
> |
mdRawData += buffer; |
391 |
> |
} |
392 |
|
mdRawData += "\n"; |
393 |
|
} |
394 |
< |
|
394 |
> |
|
395 |
> |
if (!foundVersion) mdRawData += version + "\n"; |
396 |
> |
|
397 |
|
mdFile_.close(); |
398 |
|
|
399 |
|
#ifdef IS_MPI |
521 |
|
|
522 |
|
#ifdef IS_MPI |
523 |
|
void SimCreator::divideMolecules(SimInfo *info) { |
490 |
– |
RealType numerator; |
491 |
– |
RealType denominator; |
492 |
– |
RealType precast; |
493 |
– |
RealType x; |
494 |
– |
RealType y; |
524 |
|
RealType a; |
496 |
– |
int old_atoms; |
497 |
– |
int add_atoms; |
498 |
– |
int new_atoms; |
499 |
– |
int nTarget; |
500 |
– |
int done; |
501 |
– |
int i; |
502 |
– |
int j; |
503 |
– |
int loops; |
504 |
– |
int which_proc; |
525 |
|
int nProcessors; |
526 |
|
std::vector<int> atomsPerProc; |
527 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
528 |
< |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
528 |
> |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an |
529 |
> |
// error |
530 |
> |
// condition: |
531 |
|
|
532 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
532 |
> |
MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); |
533 |
> |
//nProcessors = MPI::COMM_WORLD.Get_size(); |
534 |
|
|
535 |
|
if (nProcessors > nGlobalMols) { |
536 |
|
sprintf(painCave.errMsg, |
539 |
|
"\tthe number of molecules. This will not result in a \n" |
540 |
|
"\tusable division of atoms for force decomposition.\n" |
541 |
|
"\tEither try a smaller number of processors, or run the\n" |
542 |
< |
"\tsingle-processor version of OpenMD.\n", nProcessors, nGlobalMols); |
542 |
> |
"\tsingle-processor version of OpenMD.\n", nProcessors, |
543 |
> |
nGlobalMols); |
544 |
|
|
545 |
|
painCave.isFatal = 1; |
546 |
|
simError(); |
547 |
|
} |
548 |
|
|
525 |
– |
int seedValue; |
549 |
|
Globals * simParams = info->getSimParams(); |
550 |
< |
SeqRandNumGen* myRandom; //divide labor does not need Parallel random number generator |
550 |
> |
SeqRandNumGen* myRandom; //divide labor does not need Parallel |
551 |
> |
//random number generator |
552 |
|
if (simParams->haveSeed()) { |
553 |
< |
seedValue = simParams->getSeed(); |
553 |
> |
int seedValue = simParams->getSeed(); |
554 |
|
myRandom = new SeqRandNumGen(seedValue); |
555 |
|
}else { |
556 |
|
myRandom = new SeqRandNumGen(); |
563 |
|
atomsPerProc.insert(atomsPerProc.end(), nProcessors, 0); |
564 |
|
|
565 |
|
if (worldRank == 0) { |
566 |
< |
numerator = info->getNGlobalAtoms(); |
567 |
< |
denominator = nProcessors; |
568 |
< |
precast = numerator / denominator; |
569 |
< |
nTarget = (int)(precast + 0.5); |
566 |
> |
RealType numerator = info->getNGlobalAtoms(); |
567 |
> |
RealType denominator = nProcessors; |
568 |
> |
RealType precast = numerator / denominator; |
569 |
> |
int nTarget = (int)(precast + 0.5); |
570 |
|
|
571 |
< |
for(i = 0; i < nGlobalMols; i++) { |
572 |
< |
done = 0; |
573 |
< |
loops = 0; |
571 |
> |
for(int i = 0; i < nGlobalMols; i++) { |
572 |
> |
|
573 |
> |
int done = 0; |
574 |
> |
int loops = 0; |
575 |
|
|
576 |
|
while (!done) { |
577 |
|
loops++; |
578 |
|
|
579 |
|
// Pick a processor at random |
580 |
|
|
581 |
< |
which_proc = (int) (myRandom->rand() * nProcessors); |
581 |
> |
int which_proc = (int) (myRandom->rand() * nProcessors); |
582 |
|
|
583 |
|
//get the molecule stamp first |
584 |
|
int stampId = info->getMoleculeStampId(i); |
585 |
|
MoleculeStamp * moleculeStamp = info->getMoleculeStamp(stampId); |
586 |
|
|
587 |
|
// How many atoms does this processor have so far? |
588 |
< |
old_atoms = atomsPerProc[which_proc]; |
589 |
< |
add_atoms = moleculeStamp->getNAtoms(); |
590 |
< |
new_atoms = old_atoms + add_atoms; |
588 |
> |
int old_atoms = atomsPerProc[which_proc]; |
589 |
> |
int add_atoms = moleculeStamp->getNAtoms(); |
590 |
> |
int new_atoms = old_atoms + add_atoms; |
591 |
|
|
592 |
|
// If we've been through this loop too many times, we need |
593 |
|
// to just give up and assign the molecule to this processor |
594 |
|
// and be done with it. |
595 |
|
|
596 |
|
if (loops > 100) { |
597 |
+ |
|
598 |
|
sprintf(painCave.errMsg, |
599 |
< |
"I've tried 100 times to assign molecule %d to a " |
600 |
< |
" processor, but can't find a good spot.\n" |
601 |
< |
"I'm assigning it at random to processor %d.\n", |
599 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
600 |
> |
"\tunderworked processor, but there's no good place to\n" |
601 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
602 |
|
i, which_proc); |
603 |
< |
|
603 |
> |
|
604 |
|
painCave.isFatal = 0; |
605 |
+ |
painCave.severity = OPENMD_INFO; |
606 |
|
simError(); |
607 |
|
|
608 |
|
molToProcMap[i] = which_proc; |
631 |
|
// Pacc(x) = exp(- a * x) |
632 |
|
// where a = penalty / (average atoms per molecule) |
633 |
|
|
634 |
< |
x = (RealType)(new_atoms - nTarget); |
635 |
< |
y = myRandom->rand(); |
634 |
> |
RealType x = (RealType)(new_atoms - nTarget); |
635 |
> |
RealType y = myRandom->rand(); |
636 |
|
|
637 |
|
if (y < exp(- a * x)) { |
638 |
|
molToProcMap[i] = which_proc; |
647 |
|
} |
648 |
|
|
649 |
|
delete myRandom; |
650 |
< |
|
650 |
> |
|
651 |
|
// Spray out this nonsense to all other processors: |
625 |
– |
|
652 |
|
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
653 |
+ |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
654 |
|
} else { |
655 |
|
|
656 |
|
// Listen to your marching orders from processor 0: |
630 |
– |
|
657 |
|
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
658 |
+ |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
659 |
+ |
|
660 |
|
} |
661 |
|
|
662 |
|
info->setMolToProcMap(molToProcMap); |
703 |
|
set<AtomType*>::iterator i; |
704 |
|
bool hasDirectionalAtoms = false; |
705 |
|
bool hasFixedCharge = false; |
706 |
< |
bool hasMultipoles = false; |
706 |
> |
bool hasDipoles = false; |
707 |
> |
bool hasQuadrupoles = false; |
708 |
|
bool hasPolarizable = false; |
709 |
|
bool hasFluctuatingCharge = false; |
710 |
|
bool hasMetallic = false; |
726 |
|
if (da.isDirectional()){ |
727 |
|
hasDirectionalAtoms = true; |
728 |
|
} |
729 |
< |
if (ma.isMultipole()){ |
730 |
< |
hasMultipoles = true; |
729 |
> |
if (ma.isDipole()){ |
730 |
> |
hasDipoles = true; |
731 |
|
} |
732 |
+ |
if (ma.isQuadrupole()){ |
733 |
+ |
hasQuadrupoles = true; |
734 |
+ |
} |
735 |
|
if (ea.isEAM() || sca.isSuttonChen()){ |
736 |
|
hasMetallic = true; |
737 |
|
} |
755 |
|
storageLayout |= DataStorage::dslTorque; |
756 |
|
} |
757 |
|
} |
758 |
< |
if (hasMultipoles) { |
759 |
< |
storageLayout |= DataStorage::dslElectroFrame; |
758 |
> |
if (hasDipoles) { |
759 |
> |
storageLayout |= DataStorage::dslDipole; |
760 |
|
} |
761 |
+ |
if (hasQuadrupoles) { |
762 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
763 |
+ |
} |
764 |
|
if (hasFixedCharge || hasFluctuatingCharge) { |
765 |
|
storageLayout |= DataStorage::dslSkippedCharge; |
766 |
|
} |
795 |
|
} |
796 |
|
} |
797 |
|
|
798 |
< |
if (simParams->getOutputElectricField()) { |
798 |
> |
if (simParams->getOutputElectricField() | simParams->haveElectricField()) { |
799 |
|
storageLayout |= DataStorage::dslElectricField; |
800 |
|
} |
801 |
+ |
|
802 |
|
if (simParams->getOutputFluctuatingCharges()) { |
803 |
|
storageLayout |= DataStorage::dslFlucQPosition; |
804 |
|
storageLayout |= DataStorage::dslFlucQVelocity; |
805 |
|
storageLayout |= DataStorage::dslFlucQForce; |
806 |
|
} |
807 |
|
|
808 |
+ |
info->setStorageLayout(storageLayout); |
809 |
+ |
|
810 |
|
return storageLayout; |
811 |
|
} |
812 |
|
|
815 |
|
Molecule::AtomIterator ai; |
816 |
|
Molecule::RigidBodyIterator ri; |
817 |
|
Molecule::CutoffGroupIterator ci; |
818 |
+ |
Molecule::BondIterator boi; |
819 |
+ |
Molecule::BendIterator bei; |
820 |
+ |
Molecule::TorsionIterator ti; |
821 |
+ |
Molecule::InversionIterator ii; |
822 |
|
Molecule::IntegrableObjectIterator ioi; |
823 |
< |
Molecule * mol; |
824 |
< |
Atom * atom; |
825 |
< |
RigidBody * rb; |
826 |
< |
CutoffGroup * cg; |
823 |
> |
Molecule* mol; |
824 |
> |
Atom* atom; |
825 |
> |
RigidBody* rb; |
826 |
> |
CutoffGroup* cg; |
827 |
> |
Bond* bond; |
828 |
> |
Bend* bend; |
829 |
> |
Torsion* torsion; |
830 |
> |
Inversion* inversion; |
831 |
|
int beginAtomIndex; |
832 |
|
int beginRigidBodyIndex; |
833 |
|
int beginCutoffGroupIndex; |
834 |
+ |
int beginBondIndex; |
835 |
+ |
int beginBendIndex; |
836 |
+ |
int beginTorsionIndex; |
837 |
+ |
int beginInversionIndex; |
838 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
839 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
840 |
|
|
841 |
|
beginAtomIndex = 0; |
842 |
< |
//rigidbody's index begins right after atom's |
842 |
> |
// The rigid body indices begin immediately after the atom indices: |
843 |
|
beginRigidBodyIndex = info->getNGlobalAtoms(); |
844 |
|
beginCutoffGroupIndex = 0; |
845 |
< |
|
845 |
> |
beginBondIndex = 0; |
846 |
> |
beginBendIndex = 0; |
847 |
> |
beginTorsionIndex = 0; |
848 |
> |
beginInversionIndex = 0; |
849 |
> |
|
850 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
851 |
|
|
852 |
|
#ifdef IS_MPI |
855 |
|
// stuff to do if I own this molecule |
856 |
|
mol = info->getMoleculeByGlobalIndex(i); |
857 |
|
|
858 |
< |
//local index(index in DataStorge) of atom is important |
859 |
< |
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
858 |
> |
// The local index(index in DataStorge) of the atom is important: |
859 |
> |
for(atom = mol->beginAtom(ai); atom != NULL; |
860 |
> |
atom = mol->nextAtom(ai)) { |
861 |
|
atom->setGlobalIndex(beginAtomIndex++); |
862 |
|
} |
863 |
|
|
866 |
|
rb->setGlobalIndex(beginRigidBodyIndex++); |
867 |
|
} |
868 |
|
|
869 |
< |
//local index of cutoff group is trivial, it only depends on |
870 |
< |
//the order of travesing |
869 |
> |
// The local index of other objects only depends on the order |
870 |
> |
// of traversal: |
871 |
|
for(cg = mol->beginCutoffGroup(ci); cg != NULL; |
872 |
|
cg = mol->nextCutoffGroup(ci)) { |
873 |
|
cg->setGlobalIndex(beginCutoffGroupIndex++); |
874 |
+ |
} |
875 |
+ |
for(bond = mol->beginBond(boi); bond != NULL; |
876 |
+ |
bond = mol->nextBond(boi)) { |
877 |
+ |
bond->setGlobalIndex(beginBondIndex++); |
878 |
|
} |
879 |
+ |
for(bend = mol->beginBend(bei); bend != NULL; |
880 |
+ |
bend = mol->nextBend(bei)) { |
881 |
+ |
bend->setGlobalIndex(beginBendIndex++); |
882 |
+ |
} |
883 |
+ |
for(torsion = mol->beginTorsion(ti); torsion != NULL; |
884 |
+ |
torsion = mol->nextTorsion(ti)) { |
885 |
+ |
torsion->setGlobalIndex(beginTorsionIndex++); |
886 |
+ |
} |
887 |
+ |
for(inversion = mol->beginInversion(ii); inversion != NULL; |
888 |
+ |
inversion = mol->nextInversion(ii)) { |
889 |
+ |
inversion->setGlobalIndex(beginInversionIndex++); |
890 |
+ |
} |
891 |
|
|
892 |
|
#ifdef IS_MPI |
893 |
|
} else { |
900 |
|
beginAtomIndex += stamp->getNAtoms(); |
901 |
|
beginRigidBodyIndex += stamp->getNRigidBodies(); |
902 |
|
beginCutoffGroupIndex += stamp->getNCutoffGroups() + stamp->getNFreeAtoms(); |
903 |
+ |
beginBondIndex += stamp->getNBonds(); |
904 |
+ |
beginBendIndex += stamp->getNBends(); |
905 |
+ |
beginTorsionIndex += stamp->getNTorsions(); |
906 |
+ |
beginInversionIndex += stamp->getNInversions(); |
907 |
|
} |
908 |
|
#endif |
909 |
|
|
911 |
|
|
912 |
|
//fill globalGroupMembership |
913 |
|
std::vector<int> globalGroupMembership(info->getNGlobalAtoms(), 0); |
914 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
915 |
< |
for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { |
916 |
< |
|
914 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
915 |
> |
mol = info->nextMolecule(mi)) { |
916 |
> |
for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
917 |
> |
cg = mol->nextCutoffGroup(ci)) { |
918 |
|
for(atom = cg->beginAtom(ai); atom != NULL; atom = cg->nextAtom(ai)) { |
919 |
|
globalGroupMembership[atom->getGlobalIndex()] = cg->getGlobalIndex(); |
920 |
|
} |
929 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
930 |
|
// docs said we could. |
931 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
932 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
932 |
> |
MPI_Allreduce(&globalGroupMembership[0], |
933 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
934 |
|
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
935 |
+ |
// MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
936 |
+ |
// &tmpGroupMembership[0], nGlobalAtoms, |
937 |
+ |
// MPI::INT, MPI::SUM); |
938 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
939 |
|
#else |
940 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
941 |
|
#endif |
942 |
|
|
943 |
|
//fill molMembership |
944 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
944 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
945 |
> |
info->getNGlobalRigidBodies(), 0); |
946 |
|
|
947 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
947 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
948 |
> |
mol = info->nextMolecule(mi)) { |
949 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
950 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
951 |
|
} |
952 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
953 |
+ |
rb = mol->nextRigidBody(ri)) { |
954 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
955 |
+ |
} |
956 |
|
} |
957 |
|
|
958 |
|
#ifdef IS_MPI |
959 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
960 |
< |
|
961 |
< |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
959 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
960 |
> |
info->getNGlobalRigidBodies(), 0); |
961 |
> |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
962 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
963 |
|
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
964 |
+ |
// MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
965 |
+ |
// nGlobalAtoms + nGlobalRigidBodies, |
966 |
+ |
// MPI::INT, MPI::SUM); |
967 |
|
|
968 |
|
info->setGlobalMolMembership(tmpMolMembership); |
969 |
|
#else |
974 |
|
// here the molecules are listed by their global indices. |
975 |
|
|
976 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
977 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
977 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
978 |
> |
mol = info->nextMolecule(mi)) { |
979 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
980 |
|
} |
981 |
|
|
982 |
|
#ifdef IS_MPI |
983 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
984 |
|
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
985 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
985 |
> |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
986 |
> |
// MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
987 |
> |
// info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
988 |
|
#else |
989 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
990 |
|
#endif |
998 |
|
} |
999 |
|
|
1000 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
1001 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
1001 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
1002 |
> |
mol = info->nextMolecule(mi)) { |
1003 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
1004 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
1005 |
|
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
1015 |
|
} |
1016 |
|
|
1017 |
|
void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { |
923 |
– |
Globals* simParams; |
924 |
– |
|
925 |
– |
simParams = info->getSimParams(); |
1018 |
|
|
1019 |
|
DumpReader reader(info, mdFileName); |
1020 |
|
int nframes = reader.getNFrames(); |
1021 |
< |
|
1021 |
> |
|
1022 |
|
if (nframes > 0) { |
1023 |
|
reader.readFrame(nframes - 1); |
1024 |
|
} else { |