ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/trunk/src/math/ParallelRandNumGen.cpp
(Generate patch)

Comparing trunk/src/math/ParallelRandNumGen.cpp (file contents):
Revision 1796 by gezelter, Mon Sep 10 18:38:44 2012 UTC vs.
Revision 1969 by gezelter, Wed Feb 26 14:14:50 2014 UTC

# Line 35 | Line 35
35   *                                                                      
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38 < * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
38 > * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).          
39   * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40   * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42
43 #include "math/ParallelRandNumGen.hpp"
42   #ifdef IS_MPI
43   #include <mpi.h>
44   #endif
45  
46 + #include "math/ParallelRandNumGen.hpp"
47 +
48   namespace OpenMD {
49  
50    int ParallelRandNumGen::nCreatedRNG_ = 0;
# Line 55 | Line 55 | namespace OpenMD {
55  
56   #ifdef IS_MPI
57      const int masterNode = 0;
58 <    MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode);
58 >    MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
59   #endif
60  
61      if (seed != oneSeed) {
# Line 67 | Line 67 | namespace OpenMD {
67  
68      int nProcessors;
69   #ifdef IS_MPI
70 <    nProcessors = MPI::COMM_WORLD.Get_size();
71 <    myRank_ = MPI::COMM_WORLD.Get_rank();
70 >    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);
71 >    MPI_Comm_rank( MPI_COMM_WORLD, &myRank_);
72 >
73   #else
74      nProcessors = 1;
75      myRank_ = 0;
# Line 85 | Line 86 | namespace OpenMD {
86  
87    ParallelRandNumGen::ParallelRandNumGen() {
88  
88    std::vector<uint32> bigSeed;
89      int nProcessors;
90   #ifdef IS_MPI
91 <    nProcessors = MPI::COMM_WORLD.Get_size();
92 <    myRank_ = MPI::COMM_WORLD.Get_rank();
91 >    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);
92 >    MPI_Comm_rank( MPI_COMM_WORLD, &myRank_);
93   #else
94      nProcessors = 1;
95      myRank_ = 0;
# Line 106 | Line 106 | namespace OpenMD {
106      unsigned long seed = oneSeed;
107   #ifdef IS_MPI
108      const int masterNode = 0;
109 <    MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode);
109 >    MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
110   #endif
111      if (seed != oneSeed) {
112        sprintf(painCave.errMsg,
# Line 124 | Line 124 | namespace OpenMD {
124    void ParallelRandNumGen::seed() {
125  
126      std::vector<uint32> bigSeed;
127    int size;
127  
128   #ifdef IS_MPI
129 +    int size;
130      const int masterNode = 0;
131      if (worldRank == masterNode) {
132   #endif
133  
134        bigSeed = mtRand_->generateSeeds();
135      size = bigSeed.size();
135  
136   #ifdef IS_MPI
137 <      MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode);
138 <      MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode);
137 >      size = bigSeed.size();
138 >      MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
139 >      MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode,
140 >                MPI_COMM_WORLD);
141      }else {
142 <      MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode);
142 >      MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD);
143        bigSeed.resize(size);
144 <      MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode);
144 >      MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode,
145 >                MPI_COMM_WORLD);
146      }
147   #endif
148      

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines