ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/OpenMD/branches/development/src/math/ParallelRandNumGen.cpp
(Generate patch)

Comparing:
trunk/src/math/ParallelRandNumGen.cpp (file contents), Revision 1442 by gezelter, Mon May 10 17:28:26 2010 UTC vs.
branches/development/src/math/ParallelRandNumGen.cpp (file contents), Revision 1850 by gezelter, Wed Feb 20 15:39:39 2013 UTC

# Line 35 | Line 35
35   *                                                                      
36   * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).            
37   * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).          
38 < * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).          
39 < * [4]  Vardeman & Gezelter, in progress (2009).                        
38 > * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).          
39 > * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010).
40 > * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011).
41   */
42  
43   #include "math/ParallelRandNumGen.hpp"
# Line 50 | Line 51 | namespace OpenMD {
51  
52    ParallelRandNumGen::ParallelRandNumGen(const uint32& oneSeed) {
53  
53    const int masterNode = 0;
54      unsigned long seed = oneSeed;
55  
56   #ifdef IS_MPI
57 <    MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
57 >    const int masterNode = 0;
58 >    MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode);
59   #endif
60  
61      if (seed != oneSeed) {
# Line 66 | Line 67 | namespace OpenMD {
67  
68      int nProcessors;
69   #ifdef IS_MPI
70 <    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);
71 <    MPI_Comm_rank( MPI_COMM_WORLD, &myRank_);
70 >    nProcessors = MPI::COMM_WORLD.Get_size();
71 >    myRank_ = MPI::COMM_WORLD.Get_rank();
72   #else
73      nProcessors = 1;
74      myRank_ = 0;
# Line 85 | Line 86 | namespace OpenMD {
86    ParallelRandNumGen::ParallelRandNumGen() {
87  
88      std::vector<uint32> bigSeed;
88    const int masterNode = 0;
89      int nProcessors;
90   #ifdef IS_MPI
91 <    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);
92 <    MPI_Comm_rank( MPI_COMM_WORLD, &myRank_);
91 >    nProcessors = MPI::COMM_WORLD.Get_size();
92 >    myRank_ = MPI::COMM_WORLD.Get_rank();
93   #else
94      nProcessors = 1;
95      myRank_ = 0;
# Line 103 | Line 103 | namespace OpenMD {
103  
104    void ParallelRandNumGen::seed( const uint32 oneSeed ) {
105  
106    const int masterNode = 0;
106      unsigned long seed = oneSeed;
107   #ifdef IS_MPI
108 <    MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
108 >    const int masterNode = 0;
109 >    MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode);
110   #endif
111      if (seed != oneSeed) {
112        sprintf(painCave.errMsg,
# Line 124 | Line 124 | namespace OpenMD {
124    void ParallelRandNumGen::seed() {
125  
126      std::vector<uint32> bigSeed;
127 +
128 + #ifdef IS_MPI
129      int size;
130      const int masterNode = 0;
129 #ifdef IS_MPI
131      if (worldRank == masterNode) {
132   #endif
133  
134        bigSeed = mtRand_->generateSeeds();
134      size = bigSeed.size();
135  
136   #ifdef IS_MPI
137 <      MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD);        
138 <      MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
137 >      size = bigSeed.size();
138 >      MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode);
139 >      MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode);
140      }else {
141 <      MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD);        
141 >      MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode);
142        bigSeed.resize(size);
143 <      MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD);
143 >      MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode);
144      }
145   #endif
146      

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines