ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/brains/SimCreator.cpp
(Generate patch)

Comparing trunk/src/brains/SimCreator.cpp (file contents):
Revision 1782 by gezelter, Wed Aug 22 02:28:28 2012 UTC vs.
Revision 1801 by gezelter, Mon Oct 1 18:21:15 2012 UTC

# Line 1 | Line 1
1   /*
2 < * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
2 > * copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3   *
4   * The University of Notre Dame grants you ("Licensee") a
5   * non-exclusive, royalty free, license to use, modify and
# Line 100 | Line 100 | namespace OpenMD {
100   #ifdef IS_MPI            
101        int streamSize;
102        const int masterNode = 0;
103 <      int commStatus;
103 >
104        if (worldRank == masterNode) {
105 <        commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
105 >        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode);
106   #endif                
107          SimplePreprocessor preprocessor;
108 <        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream);
108 >        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock,
109 >                                ppStream);
110                  
111   #ifdef IS_MPI            
112          //brocasting the stream size
113          streamSize = ppStream.str().size() +1;
114 <        commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD);                  
115 <
116 <        commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);
116 <            
114 >        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode);
115 >        MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())),
116 >                              streamSize, MPI::CHAR, masterNode);
117                  
118        } else {
119  
120 <        commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
120 >        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode);
121  
122          //get stream size
123 <        commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD);  
123 >        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode);
124  
125          char* buf = new char[streamSize];
126          assert(buf);
127                  
128          //receive file content
129 <        commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);
129 >        MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode);
130                  
131          ppStream.str(buf);
132          delete [] buf;
# Line 256 | Line 256 | namespace OpenMD {
256      int metaDataBlockStart = -1;
257      int metaDataBlockEnd = -1;
258      int i;
259 <    streamoff mdOffset;
259 >    streamoff mdOffset(0);
260      int mdFileVersion;
261  
262 +
263   #ifdef IS_MPI            
264      const int masterNode = 0;
265      if (worldRank == masterNode) {
266   #endif
267  
268 <      std::ifstream mdFile_(mdFileName.c_str());
268 >      std::ifstream mdFile_;
269 >      mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary);
270        
271        if (mdFile_.fail()) {
272          sprintf(painCave.errMsg,
# Line 497 | Line 499 | namespace OpenMD {
499      int nTarget;
500      int done;
501      int i;
500    int j;
502      int loops;
503      int which_proc;
504      int nProcessors;
# Line 505 | Line 506 | namespace OpenMD {
506      int nGlobalMols = info->getNGlobalMolecules();
507      std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition:
508      
509 <    MPI_Comm_size(MPI_COMM_WORLD, &nProcessors);
509 >    nProcessors = MPI::COMM_WORLD.Get_size();
510      
511      if (nProcessors > nGlobalMols) {
512        sprintf(painCave.errMsg,
# Line 543 | Line 544 | namespace OpenMD {
544        nTarget = (int)(precast + 0.5);
545        
546        for(i = 0; i < nGlobalMols; i++) {
547 +
548          done = 0;
549          loops = 0;
550          
# Line 567 | Line 569 | namespace OpenMD {
569            // and be done with it.
570            
571            if (loops > 100) {
572 +
573              sprintf(painCave.errMsg,
574 <                    "I've tried 100 times to assign molecule %d to a "
575 <                    " processor, but can't find a good spot.\n"
576 <                    "I'm assigning it at random to processor %d.\n",
574 >                    "There have been 100 attempts to assign molecule %d to an\n"
575 >                    "\tunderworked processor, but there's no good place to\n"
576 >                    "\tleave it.  OpenMD is assigning it at random to processor %d.\n",
577                      i, which_proc);
578 <            
578 >          
579              painCave.isFatal = 0;
580 +            painCave.severity = OPENMD_INFO;
581              simError();
582              
583              molToProcMap[i] = which_proc;
# Line 618 | Line 622 | namespace OpenMD {
622        }
623        
624        delete myRandom;
625 <      
625 >
626        // Spray out this nonsense to all other processors:
627 <      
624 <      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD);
627 >      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0);
628      } else {
629        
630        // Listen to your marching orders from processor 0:
631 <      
632 <      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD);
631 >      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0);
632 >
633      }
634      
635      info->setMolToProcMap(molToProcMap);
# Line 850 | Line 853 | namespace OpenMD {
853      // This would be prettier if we could use MPI_IN_PLACE like the MPI-2
854      // docs said we could.
855      std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0);
856 <    MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms,
857 <                  MPI_INT, MPI_SUM, MPI_COMM_WORLD);
856 >    MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],
857 >                              &tmpGroupMembership[0], nGlobalAtoms,
858 >                              MPI::INT, MPI::SUM);
859      info->setGlobalGroupMembership(tmpGroupMembership);
860   #else
861      info->setGlobalGroupMembership(globalGroupMembership);
# Line 868 | Line 872 | namespace OpenMD {
872      
873   #ifdef IS_MPI
874      std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0);
875 +    MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],
876 +                              nGlobalAtoms,
877 +                              MPI::INT, MPI::SUM);
878      
872    MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms,
873                  MPI_INT, MPI_SUM, MPI_COMM_WORLD);
874    
879      info->setGlobalMolMembership(tmpMolMembership);
880   #else
881      info->setGlobalMolMembership(globalMolMembership);
# Line 887 | Line 891 | namespace OpenMD {
891      
892   #ifdef IS_MPI
893      std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0);
894 <    MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],
895 <                  info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD);
894 >    MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],
895 >                              info->getNGlobalMolecules(), MPI::INT, MPI::SUM);
896   #else
897      std::vector<int> numIntegrableObjectsPerMol = nIOPerMol;
898   #endif    
# Line 918 | Line 922 | namespace OpenMD {
922    }
923    
924    void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) {
921    Globals* simParams;
925  
923    simParams = info->getSimParams();
924    
926      DumpReader reader(info, mdFileName);
927      int nframes = reader.getNFrames();
928  

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines