ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/brains/SimCreator.cpp
(Generate patch)

Comparing trunk/src/brains/SimCreator.cpp (file contents):
Revision 1953 by gezelter, Thu Dec 5 18:19:26 2013 UTC vs.
Revision 1993 by gezelter, Tue Apr 29 17:32:31 2014 UTC

# Line 64 | Line 64
64   #include "brains/ForceField.hpp"
65   #include "utils/simError.h"
66   #include "utils/StringUtils.hpp"
67 + #include "utils/Revision.hpp"
68   #include "math/SeqRandNumGen.hpp"
69   #include "mdParser/MDLexer.hpp"
70   #include "mdParser/MDParser.hpp"
# Line 103 | Line 104 | namespace OpenMD {
104        const int masterNode = 0;
105  
106        if (worldRank == masterNode) {
107 <        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode);
107 >        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
108   #endif                
109          SimplePreprocessor preprocessor;
110 <        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream);
110 >        preprocessor.preprocess(rawMetaDataStream, filename,
111 >                                startOfMetaDataBlock, ppStream);
112                  
113   #ifdef IS_MPI            
114 <        //brocasting the stream size
114 >        //broadcasting the stream size
115          streamSize = ppStream.str().size() +1;
116 <        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode);
117 <        MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode);
118 <                          
116 >        MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
117 >        MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())),
118 >                  streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);
119        } else {
118        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode);
120  
121 <        //get stream size
121 <        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode);
121 >        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
122  
123 +        //get stream size
124 +        MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
125          char* buf = new char[streamSize];
126          assert(buf);
127                  
128          //receive file content
129 <        MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode);
130 <                
129 >        MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);
130 >
131          ppStream.str(buf);
132          delete [] buf;
133        }
# Line 149 | Line 151 | namespace OpenMD {
151        parser.initializeASTFactory(factory);
152        parser.setASTFactory(&factory);
153        parser.mdfile();
152
154        // Create a tree parser that reads information into Globals
155        MDTreeParser treeParser;
156        treeParser.initializeASTFactory(factory);
# Line 263 | Line 264 | namespace OpenMD {
264      version.append(".");
265      version.append(OPENMD_VERSION_MINOR);
266  
267 <    std::string svnrev;
267 >    std::string svnrev(g_REVISION, strnlen(g_REVISION, 20));
268      //convert a macro from compiler to a string in c++
269 <    STR_DEFINE(svnrev, SVN_REV );
269 >    // STR_DEFINE(svnrev, SVN_REV );
270      version.append(" Revision: ");
271      // If there's no SVN revision, just call this the RELEASE revision.
272      if (!svnrev.empty()) {
# Line 520 | Line 521 | namespace OpenMD {
521                                                      // error
522                                                      // condition:
523      
524 <    nProcessors = MPI::COMM_WORLD.Get_size();
524 >    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);    
525      
526      if (nProcessors > nGlobalMols) {
527        sprintf(painCave.errMsg,
# Line 639 | Line 640 | namespace OpenMD {
640        delete myRandom;
641  
642        // Spray out this nonsense to all other processors:
643 <      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0);
643 >      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD);
644 >
645      } else {
646        
647        // Listen to your marching orders from processor 0:
648 <      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0);
648 >      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD);
649  
650      }
651      
# Line 787 | Line 789 | namespace OpenMD {
789        storageLayout |= DataStorage::dslElectricField;
790      }
791  
792 +    if (simParams->getOutputSitePotential() ) {
793 +      storageLayout |= DataStorage::dslSitePotential;
794 +    }
795 +
796      if (simParams->getOutputFluctuatingCharges()) {
797        storageLayout |= DataStorage::dslFlucQPosition;
798        storageLayout |= DataStorage::dslFlucQVelocity;
# Line 917 | Line 923 | namespace OpenMD {
923      // This would be prettier if we could use MPI_IN_PLACE like the MPI-2
924      // docs said we could.
925      std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0);
926 <    MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],
927 <                              &tmpGroupMembership[0], nGlobalAtoms,
928 <                              MPI::INT, MPI::SUM);
926 >    MPI_Allreduce(&globalGroupMembership[0],
927 >                  &tmpGroupMembership[0], nGlobalAtoms,
928 >                  MPI_INT, MPI_SUM, MPI_COMM_WORLD);
929 >
930      info->setGlobalGroupMembership(tmpGroupMembership);
931   #else
932      info->setGlobalGroupMembership(globalGroupMembership);
# Line 943 | Line 950 | namespace OpenMD {
950   #ifdef IS_MPI
951      std::vector<int> tmpMolMembership(info->getNGlobalAtoms() +
952                                        info->getNGlobalRigidBodies(), 0);
953 <    MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],
954 <                              nGlobalAtoms + nGlobalRigidBodies,
955 <                              MPI::INT, MPI::SUM);
953 >    MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0],
954 >                  nGlobalAtoms + nGlobalRigidBodies,
955 >                  MPI_INT, MPI_SUM, MPI_COMM_WORLD);
956      
957      info->setGlobalMolMembership(tmpMolMembership);
958   #else
# Line 963 | Line 970 | namespace OpenMD {
970      
971   #ifdef IS_MPI
972      std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0);
973 <    MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],
974 <                              info->getNGlobalMolecules(), MPI::INT, MPI::SUM);
973 >    MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],
974 >      info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
975   #else
976      std::vector<int> numIntegrableObjectsPerMol = nIOPerMol;
977   #endif    

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines