46 |
|
* @date 11/03/2004 |
47 |
|
* @version 1.0 |
48 |
|
*/ |
49 |
+ |
|
50 |
+ |
#ifdef IS_MPI |
51 |
+ |
#include "mpi.h" |
52 |
+ |
#include "math/ParallelRandNumGen.hpp" |
53 |
+ |
#endif |
54 |
+ |
|
55 |
|
#include <exception> |
56 |
|
#include <iostream> |
57 |
|
#include <sstream> |
64 |
|
#include "brains/ForceField.hpp" |
65 |
|
#include "utils/simError.h" |
66 |
|
#include "utils/StringUtils.hpp" |
67 |
+ |
#include "utils/Revision.hpp" |
68 |
|
#include "math/SeqRandNumGen.hpp" |
69 |
|
#include "mdParser/MDLexer.hpp" |
70 |
|
#include "mdParser/MDParser.hpp" |
90 |
|
#include "types/FixedChargeAdapter.hpp" |
91 |
|
#include "types/FluctuatingChargeAdapter.hpp" |
92 |
|
|
86 |
– |
#ifdef IS_MPI |
87 |
– |
#include "mpi.h" |
88 |
– |
#include "math/ParallelRandNumGen.hpp" |
89 |
– |
#endif |
93 |
|
|
94 |
|
namespace OpenMD { |
95 |
|
|
104 |
|
const int masterNode = 0; |
105 |
|
|
106 |
|
if (worldRank == masterNode) { |
107 |
< |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
107 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
108 |
|
#endif |
109 |
|
SimplePreprocessor preprocessor; |
110 |
< |
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
110 |
> |
preprocessor.preprocess(rawMetaDataStream, filename, |
111 |
> |
startOfMetaDataBlock, ppStream); |
112 |
|
|
113 |
|
#ifdef IS_MPI |
114 |
< |
//brocasting the stream size |
115 |
< |
streamSize = ppStream.str().size() +1; |
116 |
< |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
117 |
< |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
118 |
< |
|
114 |
> |
//broadcasting the stream size |
115 |
> |
streamSize = ppStream.str().size() +1; |
116 |
> |
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
117 |
> |
MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
118 |
> |
streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
119 |
|
} else { |
116 |
– |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
120 |
|
|
121 |
< |
//get stream size |
119 |
< |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
121 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
122 |
|
|
123 |
+ |
//get stream size |
124 |
+ |
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
125 |
|
char* buf = new char[streamSize]; |
126 |
|
assert(buf); |
127 |
|
|
128 |
|
//receive file content |
129 |
< |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
130 |
< |
|
129 |
> |
MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
130 |
> |
|
131 |
|
ppStream.str(buf); |
132 |
|
delete [] buf; |
133 |
|
} |
151 |
|
parser.initializeASTFactory(factory); |
152 |
|
parser.setASTFactory(&factory); |
153 |
|
parser.mdfile(); |
150 |
– |
|
154 |
|
// Create a tree parser that reads information into Globals |
155 |
|
MDTreeParser treeParser; |
156 |
|
treeParser.initializeASTFactory(factory); |
228 |
|
catch (OpenMDException& e) { |
229 |
|
sprintf(painCave.errMsg, |
230 |
|
"%s\n", |
231 |
< |
e.getMessage().c_str()); |
231 |
> |
e.what()); |
232 |
|
painCave.isFatal = 1; |
233 |
|
simError(); |
234 |
|
} |
253 |
|
std::string mdRawData; |
254 |
|
int metaDataBlockStart = -1; |
255 |
|
int metaDataBlockEnd = -1; |
253 |
– |
int i, j; |
256 |
|
streamoff mdOffset; |
257 |
|
int mdFileVersion; |
258 |
|
|
263 |
|
version.append("."); |
264 |
|
version.append(OPENMD_VERSION_MINOR); |
265 |
|
|
266 |
< |
std::string svnrev; |
266 |
> |
std::string svnrev(g_REVISION, strnlen(g_REVISION, 20)); |
267 |
|
//convert a macro from compiler to a string in c++ |
268 |
< |
STR_DEFINE(svnrev, SVN_REV ); |
268 |
> |
// STR_DEFINE(svnrev, SVN_REV ); |
269 |
|
version.append(" Revision: "); |
270 |
|
// If there's no SVN revision, just call this the RELEASE revision. |
271 |
|
if (!svnrev.empty()) { |
293 |
|
mdFile_.getline(buffer, bufferSize); |
294 |
|
++lineNo; |
295 |
|
std::string line = trimLeftCopy(buffer); |
296 |
< |
i = CaseInsensitiveFind(line, "<OpenMD"); |
297 |
< |
if (static_cast<size_t>(i) == string::npos) { |
296 |
> |
std::size_t i = CaseInsensitiveFind(line, "<OpenMD"); |
297 |
> |
if (i == string::npos) { |
298 |
|
// try the older file strings to see if that works: |
299 |
|
i = CaseInsensitiveFind(line, "<OOPSE"); |
300 |
|
} |
301 |
|
|
302 |
< |
if (static_cast<size_t>(i) == string::npos) { |
302 |
> |
if (i == string::npos) { |
303 |
|
// still no luck! |
304 |
|
sprintf(painCave.errMsg, |
305 |
|
"SimCreator: File: %s is not a valid OpenMD file!\n", |
334 |
|
|
335 |
|
std::string line = trimLeftCopy(buffer); |
336 |
|
if (metaDataBlockStart == -1) { |
337 |
< |
i = CaseInsensitiveFind(line, "<MetaData>"); |
337 |
> |
std::size_t i = CaseInsensitiveFind(line, "<MetaData>"); |
338 |
|
if (i != string::npos) { |
339 |
|
metaDataBlockStart = lineNo; |
340 |
|
mdOffset = mdFile_.tellg(); |
341 |
|
} |
342 |
|
} else { |
343 |
< |
i = CaseInsensitiveFind(line, "</MetaData>"); |
343 |
> |
std::size_t i = CaseInsensitiveFind(line, "</MetaData>"); |
344 |
|
if (i != string::npos) { |
345 |
|
metaDataBlockEnd = lineNo; |
346 |
|
} |
373 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
374 |
|
mdFile_.getline(buffer, bufferSize); |
375 |
|
std::string line = trimLeftCopy(buffer); |
376 |
< |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
377 |
< |
if (static_cast<size_t>(j) != string::npos) { |
376 |
> |
std::size_t j = CaseInsensitiveFind(line, |
377 |
> |
"## Last run using OpenMD Version"); |
378 |
> |
if (j != string::npos) { |
379 |
|
foundVersion = true; |
380 |
|
mdRawData += version; |
381 |
|
} else { |
521 |
|
// error |
522 |
|
// condition: |
523 |
|
|
524 |
< |
nProcessors = MPI::COMM_WORLD.Get_size(); |
524 |
> |
MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); |
525 |
|
|
526 |
|
if (nProcessors > nGlobalMols) { |
527 |
|
sprintf(painCave.errMsg, |
640 |
|
delete myRandom; |
641 |
|
|
642 |
|
// Spray out this nonsense to all other processors: |
643 |
< |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
643 |
> |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
644 |
> |
|
645 |
|
} else { |
646 |
|
|
647 |
|
// Listen to your marching orders from processor 0: |
648 |
< |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
648 |
> |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
649 |
|
|
650 |
|
} |
651 |
|
|
785 |
|
} |
786 |
|
} |
787 |
|
|
788 |
< |
if (simParams->getOutputElectricField() | simParams->haveElectricField()) { |
788 |
> |
if (simParams->getOutputElectricField() | |
789 |
> |
simParams->haveElectricField() | simParams->haveUniformField() | |
790 |
> |
simParams->haveUniformGradientStrength() | |
791 |
> |
simParams->haveUniformGradientDirection1() | |
792 |
> |
simParams->haveUniformGradientDirection2() ) { |
793 |
|
storageLayout |= DataStorage::dslElectricField; |
794 |
|
} |
795 |
|
|
796 |
+ |
if (simParams->getOutputSitePotential() ) { |
797 |
+ |
storageLayout |= DataStorage::dslSitePotential; |
798 |
+ |
} |
799 |
+ |
|
800 |
|
if (simParams->getOutputFluctuatingCharges()) { |
801 |
|
storageLayout |= DataStorage::dslFlucQPosition; |
802 |
|
storageLayout |= DataStorage::dslFlucQVelocity; |
813 |
|
Molecule::AtomIterator ai; |
814 |
|
Molecule::RigidBodyIterator ri; |
815 |
|
Molecule::CutoffGroupIterator ci; |
816 |
+ |
Molecule::BondIterator boi; |
817 |
+ |
Molecule::BendIterator bei; |
818 |
+ |
Molecule::TorsionIterator ti; |
819 |
+ |
Molecule::InversionIterator ii; |
820 |
|
Molecule::IntegrableObjectIterator ioi; |
821 |
< |
Molecule * mol; |
822 |
< |
Atom * atom; |
823 |
< |
RigidBody * rb; |
824 |
< |
CutoffGroup * cg; |
821 |
> |
Molecule* mol; |
822 |
> |
Atom* atom; |
823 |
> |
RigidBody* rb; |
824 |
> |
CutoffGroup* cg; |
825 |
> |
Bond* bond; |
826 |
> |
Bend* bend; |
827 |
> |
Torsion* torsion; |
828 |
> |
Inversion* inversion; |
829 |
|
int beginAtomIndex; |
830 |
|
int beginRigidBodyIndex; |
831 |
|
int beginCutoffGroupIndex; |
832 |
+ |
int beginBondIndex; |
833 |
+ |
int beginBendIndex; |
834 |
+ |
int beginTorsionIndex; |
835 |
+ |
int beginInversionIndex; |
836 |
+ |
#ifdef IS_MPI |
837 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
838 |
|
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
839 |
+ |
#endif |
840 |
|
|
841 |
|
beginAtomIndex = 0; |
842 |
< |
//rigidbody's index begins right after atom's |
842 |
> |
// The rigid body indices begin immediately after the atom indices: |
843 |
|
beginRigidBodyIndex = info->getNGlobalAtoms(); |
844 |
|
beginCutoffGroupIndex = 0; |
845 |
< |
|
845 |
> |
beginBondIndex = 0; |
846 |
> |
beginBendIndex = 0; |
847 |
> |
beginTorsionIndex = 0; |
848 |
> |
beginInversionIndex = 0; |
849 |
> |
|
850 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
851 |
|
|
852 |
|
#ifdef IS_MPI |
855 |
|
// stuff to do if I own this molecule |
856 |
|
mol = info->getMoleculeByGlobalIndex(i); |
857 |
|
|
858 |
< |
//local index(index in DataStorge) of atom is important |
859 |
< |
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
858 |
> |
// The local index(index in DataStorge) of the atom is important: |
859 |
> |
for(atom = mol->beginAtom(ai); atom != NULL; |
860 |
> |
atom = mol->nextAtom(ai)) { |
861 |
|
atom->setGlobalIndex(beginAtomIndex++); |
862 |
|
} |
863 |
|
|
866 |
|
rb->setGlobalIndex(beginRigidBodyIndex++); |
867 |
|
} |
868 |
|
|
869 |
< |
//local index of cutoff group is trivial, it only depends on |
870 |
< |
//the order of travesing |
869 |
> |
// The local index of other objects only depends on the order |
870 |
> |
// of traversal: |
871 |
|
for(cg = mol->beginCutoffGroup(ci); cg != NULL; |
872 |
|
cg = mol->nextCutoffGroup(ci)) { |
873 |
|
cg->setGlobalIndex(beginCutoffGroupIndex++); |
874 |
|
} |
875 |
+ |
for(bond = mol->beginBond(boi); bond != NULL; |
876 |
+ |
bond = mol->nextBond(boi)) { |
877 |
+ |
bond->setGlobalIndex(beginBondIndex++); |
878 |
+ |
} |
879 |
+ |
for(bend = mol->beginBend(bei); bend != NULL; |
880 |
+ |
bend = mol->nextBend(bei)) { |
881 |
+ |
bend->setGlobalIndex(beginBendIndex++); |
882 |
+ |
} |
883 |
+ |
for(torsion = mol->beginTorsion(ti); torsion != NULL; |
884 |
+ |
torsion = mol->nextTorsion(ti)) { |
885 |
+ |
torsion->setGlobalIndex(beginTorsionIndex++); |
886 |
+ |
} |
887 |
+ |
for(inversion = mol->beginInversion(ii); inversion != NULL; |
888 |
+ |
inversion = mol->nextInversion(ii)) { |
889 |
+ |
inversion->setGlobalIndex(beginInversionIndex++); |
890 |
+ |
} |
891 |
|
|
892 |
|
#ifdef IS_MPI |
893 |
|
} else { |
900 |
|
beginAtomIndex += stamp->getNAtoms(); |
901 |
|
beginRigidBodyIndex += stamp->getNRigidBodies(); |
902 |
|
beginCutoffGroupIndex += stamp->getNCutoffGroups() + stamp->getNFreeAtoms(); |
903 |
+ |
beginBondIndex += stamp->getNBonds(); |
904 |
+ |
beginBendIndex += stamp->getNBends(); |
905 |
+ |
beginTorsionIndex += stamp->getNTorsions(); |
906 |
+ |
beginInversionIndex += stamp->getNInversions(); |
907 |
|
} |
908 |
|
#endif |
909 |
|
|
911 |
|
|
912 |
|
//fill globalGroupMembership |
913 |
|
std::vector<int> globalGroupMembership(info->getNGlobalAtoms(), 0); |
914 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
915 |
< |
for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { |
916 |
< |
|
914 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
915 |
> |
mol = info->nextMolecule(mi)) { |
916 |
> |
for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
917 |
> |
cg = mol->nextCutoffGroup(ci)) { |
918 |
|
for(atom = cg->beginAtom(ai); atom != NULL; atom = cg->nextAtom(ai)) { |
919 |
|
globalGroupMembership[atom->getGlobalIndex()] = cg->getGlobalIndex(); |
920 |
|
} |
929 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
930 |
|
// docs said we could. |
931 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
932 |
< |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
933 |
< |
&tmpGroupMembership[0], nGlobalAtoms, |
934 |
< |
MPI::INT, MPI::SUM); |
932 |
> |
MPI_Allreduce(&globalGroupMembership[0], |
933 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
934 |
> |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
935 |
> |
|
936 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
937 |
|
#else |
938 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
956 |
|
#ifdef IS_MPI |
957 |
|
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
958 |
|
info->getNGlobalRigidBodies(), 0); |
959 |
< |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
960 |
< |
nGlobalAtoms + nGlobalRigidBodies, |
961 |
< |
MPI::INT, MPI::SUM); |
959 |
> |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
960 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
961 |
> |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
962 |
|
|
963 |
|
info->setGlobalMolMembership(tmpMolMembership); |
964 |
|
#else |
976 |
|
|
977 |
|
#ifdef IS_MPI |
978 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
979 |
< |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
980 |
< |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
979 |
> |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
980 |
> |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
981 |
|
#else |
982 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
983 |
|
#endif |