42 |
|
#include "math/ParallelRandNumGen.hpp" |
43 |
|
#ifdef IS_MPI |
44 |
|
#include <mpi.h> |
45 |
+ |
#endif |
46 |
|
|
47 |
|
namespace oopse { |
48 |
|
|
48 |
– |
|
49 |
– |
|
49 |
|
int ParallelRandNumGen::nCreatedRNG_ = 0; |
50 |
|
|
51 |
< |
ParallelRandNumGen::ParallelRandNumGen( const uint32& oneSeed) { |
51 |
> |
ParallelRandNumGen::ParallelRandNumGen(const uint32& oneSeed) { |
52 |
|
|
53 |
|
const int masterNode = 0; |
54 |
|
int seed = oneSeed; |
55 |
+ |
#ifdef IS_MPI |
56 |
|
MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
57 |
< |
|
57 |
> |
#endif |
58 |
|
if (seed != oneSeed) { |
59 |
|
sprintf(painCave.errMsg, |
60 |
|
"Using different seed to initialize ParallelRandNumGen.\n"); |
63 |
|
} |
64 |
|
|
65 |
|
int nProcessors; |
66 |
+ |
#ifdef IS_MPI |
67 |
|
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
68 |
|
MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); |
69 |
< |
//In order to generate independent random number stream, the actual seed used by random |
70 |
< |
//number generator is the seed passed to the constructor plus the number of random number |
71 |
< |
//generators which are already created. |
69 |
> |
#else |
70 |
> |
nProcessors = 1; |
71 |
> |
myRank_ = 0; |
72 |
> |
#endif |
73 |
> |
//In order to generate independent random number stream, the |
74 |
> |
//actual seed used by random number generator is the seed passed |
75 |
> |
//to the constructor plus the number of random number generators |
76 |
> |
//which are already created. |
77 |
|
int newSeed = oneSeed + nCreatedRNG_; |
78 |
|
mtRand_ = new MTRand(newSeed, nProcessors, myRank_); |
79 |
< |
|
79 |
> |
|
80 |
|
++nCreatedRNG_; |
81 |
|
} |
82 |
|
|
85 |
|
std::vector<uint32> bigSeed; |
86 |
|
const int masterNode = 0; |
87 |
|
int nProcessors; |
88 |
+ |
#ifdef IS_MPI |
89 |
|
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
90 |
|
MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); |
91 |
+ |
#else |
92 |
+ |
nProcessors = 1; |
93 |
+ |
myRank_ = 0; |
94 |
+ |
#endif |
95 |
|
mtRand_ = new MTRand(nProcessors, myRank_); |
96 |
|
|
97 |
< |
seed(); /** @todo calling virtual function in constructor is not a good design */ |
97 |
> |
seed(); /** @todo calling virtual function in constructor is |
98 |
> |
not a good design */ |
99 |
|
} |
100 |
|
|
101 |
|
|
103 |
|
|
104 |
|
const int masterNode = 0; |
105 |
|
int seed = oneSeed; |
106 |
+ |
#ifdef IS_MPI |
107 |
|
MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
108 |
< |
|
108 |
> |
#endif |
109 |
|
if (seed != oneSeed) { |
110 |
|
sprintf(painCave.errMsg, |
111 |
|
"Using different seed to initialize ParallelRandNumGen.\n"); |
112 |
|
painCave.isFatal = 1;; |
113 |
|
simError(); |
114 |
|
} |
115 |
< |
|
115 |
> |
|
116 |
|
int newSeed = oneSeed +nCreatedRNG_; |
117 |
|
mtRand_->seed(newSeed); |
118 |
< |
|
118 |
> |
|
119 |
|
++nCreatedRNG_; |
120 |
|
} |
121 |
|
|
124 |
|
std::vector<uint32> bigSeed; |
125 |
|
int size; |
126 |
|
const int masterNode = 0; |
127 |
+ |
#ifdef IS_MPI |
128 |
|
if (worldRank == masterNode) { |
129 |
+ |
#endif |
130 |
+ |
|
131 |
|
bigSeed = mtRand_->generateSeeds(); |
132 |
|
size = bigSeed.size(); |
133 |
+ |
|
134 |
+ |
#ifdef IS_MPI |
135 |
|
MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
136 |
|
MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
119 |
– |
|
137 |
|
}else { |
138 |
|
MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
139 |
|
bigSeed.resize(size); |
140 |
|
MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
141 |
|
} |
142 |
+ |
#endif |
143 |
|
|
144 |
|
if (bigSeed.size() == 1) { |
145 |
|
mtRand_->seed(bigSeed[0]); |
148 |
|
} |
149 |
|
|
150 |
|
++nCreatedRNG_; |
151 |
< |
} |
134 |
< |
|
135 |
< |
|
151 |
> |
} |
152 |
|
} |
137 |
– |
|
138 |
– |
#endif |