1 |
gezelter |
1490 |
#ifdef IS_MPI |
2 |
tim |
1723 |
|
3 |
gezelter |
1490 |
#include <iostream> |
4 |
|
|
#include <stdlib.h> |
5 |
|
|
#include <string.h> |
6 |
|
|
#include <math.h> |
7 |
|
|
#include <mpi.h> |
8 |
|
|
|
9 |
tim |
1492 |
#include "brains/mpiSimulation.hpp" |
10 |
|
|
#include "utils/simError.h" |
11 |
chuckv |
1619 |
#include "UseTheForce/DarkSide/simParallel_interface.h" |
12 |
tim |
1492 |
#include "math/randomSPRNG.hpp" |
13 |
gezelter |
1490 |
|
14 |
tim |
1723 |
mpiSimulation * mpiSim; |
15 |
gezelter |
1490 |
|
16 |
tim |
1723 |
mpiSimulation::mpiSimulation( SimInfo *the_entryPlug ) { |
17 |
|
|
parallelData = new mpiSimData; |
18 |
gezelter |
1490 |
|
19 |
tim |
1723 |
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors)); |
20 |
|
|
parallelData->myNode = worldRank; |
21 |
gezelter |
1490 |
|
22 |
tim |
1723 |
MolToProcMap = new int[entryPlug->n_mol]; |
23 |
gezelter |
1490 |
} |
24 |
|
|
|
25 |
tim |
1723 |
mpiSimulation::~mpiSimulation() { |
26 |
|
|
delete [] MolToProcMap; |
27 |
gezelter |
1490 |
|
28 |
tim |
1723 |
delete parallelData; |
29 |
|
|
// perhaps we should let fortran know the party is over. |
30 |
gezelter |
1490 |
|
31 |
|
|
} |
32 |
|
|
|
33 |
tim |
1723 |
void mpiSimulation::divideLabor() { |
34 |
|
|
int nComponents; |
35 |
|
|
MoleculeStamp ** compStamps; |
36 |
|
|
randomSPRNG * myRandom; |
37 |
|
|
int * componentsNmol; |
38 |
|
|
int * AtomsPerProc; |
39 |
gezelter |
1490 |
|
40 |
tim |
1723 |
double numerator; |
41 |
|
|
double denominator; |
42 |
|
|
double precast; |
43 |
|
|
double x; |
44 |
|
|
double y; |
45 |
|
|
double a; |
46 |
|
|
int old_atoms; |
47 |
|
|
int add_atoms; |
48 |
|
|
int new_atoms; |
49 |
gezelter |
1490 |
|
50 |
tim |
1723 |
int nTarget; |
51 |
|
|
int molIndex; |
52 |
|
|
int atomIndex; |
53 |
|
|
int done; |
54 |
|
|
int i; |
55 |
|
|
int j; |
56 |
|
|
int loops; |
57 |
|
|
int which_proc; |
58 |
|
|
int nmol_global, |
59 |
|
|
nmol_local; |
60 |
|
|
int natoms_global, |
61 |
gezelter |
1490 |
|
62 |
tim |
1723 |
int baseSeed = entryPlug->getSeed(); |
63 |
|
|
CutoffGroupStamp * cg; |
64 |
gezelter |
1490 |
|
65 |
tim |
1723 |
nComponents = entryPlug->nComponents; |
66 |
|
|
compStamps = entryPlug->compStamps; |
67 |
|
|
componentsNmol = entryPlug->componentsNmol; |
68 |
|
|
AtomsPerProc = new int[parallelData->nProcessors]; |
69 |
gezelter |
1490 |
|
70 |
tim |
1723 |
parallelData->nMolGlobal = entryPlug->n_mol; |
71 |
gezelter |
1490 |
|
72 |
tim |
1723 |
if (parallelData->nProcessors > parallelData->nMolGlobal) { |
73 |
|
|
sprintf(painCave.errMsg, |
74 |
|
|
"nProcessors (%d) > nMol (%d)\n" |
75 |
|
|
"\tThe number of processors is larger than\n" |
76 |
|
|
"\tthe number of molecules. This will not result in a \n" |
77 |
|
|
"\tusable division of atoms for force decomposition.\n" |
78 |
|
|
"\tEither try a smaller number of processors, or run the\n" |
79 |
|
|
"\tsingle-processor version of OOPSE.\n", |
80 |
|
|
parallelData->nProcessors, |
81 |
|
|
parallelData->nMolGlobal); |
82 |
gezelter |
1490 |
|
83 |
tim |
1723 |
painCave.isFatal = 1; |
84 |
|
|
simError(); |
85 |
|
|
} |
86 |
gezelter |
1490 |
|
87 |
tim |
1723 |
myRandom = new randomSPRNG(baseSeed); |
88 |
gezelter |
1490 |
|
89 |
tim |
1723 |
a = 3.0 * parallelData->nMolGlobal / parallelData->nAtomsGlobal; |
90 |
tim |
1722 |
|
91 |
tim |
1723 |
// Initialize things that we'll send out later: |
92 |
|
|
for( i = 0; i < parallelData->nProcessors; i++ ) { |
93 |
|
|
AtomsPerProc[i] = 0; |
94 |
|
|
} |
95 |
gezelter |
1490 |
|
96 |
tim |
1723 |
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
97 |
|
|
// default to an error condition: |
98 |
|
|
MolToProcMap[i] = -1; |
99 |
gezelter |
1490 |
} |
100 |
|
|
|
101 |
tim |
1723 |
if (parallelData->myNode == 0) { |
102 |
|
|
numerator = entryPlug->n_atoms; |
103 |
|
|
denominator = parallelData->nProcessors; |
104 |
|
|
precast = numerator / denominator; |
105 |
|
|
nTarget = (int)(precast + 0.5); |
106 |
gezelter |
1490 |
|
107 |
tim |
1723 |
// Build the array of molecule component types first |
108 |
|
|
molIndex = 0; |
109 |
gezelter |
1490 |
|
110 |
tim |
1723 |
for( i = 0; i < nComponents; i++ ) { |
111 |
|
|
for( j = 0; j < componentsNmol[i]; j++ ) { |
112 |
|
|
molIndex++; |
113 |
|
|
} |
114 |
|
|
} |
115 |
gezelter |
1490 |
|
116 |
tim |
1723 |
for( i = 0; i < molIndex; i++ ) { |
117 |
|
|
done = 0; |
118 |
|
|
loops = 0; |
119 |
gezelter |
1490 |
|
120 |
tim |
1723 |
while (!done) { |
121 |
|
|
loops++; |
122 |
gezelter |
1490 |
|
123 |
tim |
1723 |
// Pick a processor at random |
124 |
tim |
1722 |
|
125 |
tim |
1723 |
which_proc |
126 |
tim |
1722 |
|
127 |
tim |
1723 |
= (int) (myRandom->getRandom() * parallelData->nProcessors); |
128 |
gezelter |
1490 |
|
129 |
tim |
1723 |
// How many atoms does this processor have? |
130 |
gezelter |
1490 |
|
131 |
tim |
1723 |
old_atoms = AtomsPerProc[which_proc]; |
132 |
|
|
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
133 |
|
|
new_atoms = old_atoms + add_atoms; |
134 |
gezelter |
1490 |
|
135 |
tim |
1723 |
// If we've been through this loop too many times, we need |
136 |
|
|
// to just give up and assign the molecule to this processor |
137 |
|
|
// and be done with it. |
138 |
gezelter |
1490 |
|
139 |
tim |
1723 |
if (loops > 100) { |
140 |
|
|
sprintf( |
141 |
|
|
painCave.errMsg, |
142 |
|
|
"I've tried 100 times to assign molecule %d to a " |
143 |
|
|
" processor, but can't find a good spot.\n" |
144 |
|
|
"I'm assigning it at random to processor %d.\n", |
145 |
|
|
i, |
146 |
|
|
which_proc); |
147 |
tim |
1722 |
|
148 |
tim |
1723 |
painCave.isFatal = 0; |
149 |
|
|
simError(); |
150 |
gezelter |
1490 |
|
151 |
tim |
1723 |
MolToProcMap[i] = which_proc; |
152 |
|
|
AtomsPerProc[which_proc] += add_atoms; |
153 |
gezelter |
1490 |
|
154 |
tim |
1723 |
done = 1; |
155 |
|
|
continue; |
156 |
|
|
} |
157 |
gezelter |
1490 |
|
158 |
tim |
1723 |
// If we can add this molecule to this processor without sending |
159 |
|
|
// it above nTarget, then go ahead and do it: |
160 |
gezelter |
1490 |
|
161 |
tim |
1723 |
if (new_atoms <= nTarget) { |
162 |
|
|
MolToProcMap[i] = which_proc; |
163 |
|
|
AtomsPerProc[which_proc] += add_atoms; |
164 |
|
|
|
165 |
|
|
done = 1; |
166 |
|
|
continue; |
167 |
|
|
} |
168 |
|
|
|
169 |
|
|
// The only situation left is when new_atoms > nTarget. We |
170 |
|
|
// want to accept this with some probability that dies off the |
171 |
|
|
// farther we are from nTarget |
172 |
|
|
|
173 |
|
|
// roughly: x = new_atoms - nTarget |
174 |
|
|
// Pacc(x) = exp(- a * x) |
175 |
|
|
// where a = penalty / (average atoms per molecule) |
176 |
|
|
|
177 |
|
|
x = (double)(new_atoms - nTarget); |
178 |
|
|
y = myRandom->getRandom(); |
179 |
|
|
|
180 |
|
|
if (y < exp(- a * x)) { |
181 |
|
|
MolToProcMap[i] = which_proc; |
182 |
|
|
AtomsPerProc[which_proc] += add_atoms; |
183 |
|
|
|
184 |
|
|
done = 1; |
185 |
|
|
continue; |
186 |
|
|
} else { continue; } |
187 |
|
|
} |
188 |
|
|
} |
189 |
|
|
|
190 |
|
|
// Spray out this nonsense to all other processors: |
191 |
|
|
|
192 |
|
|
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0, |
193 |
|
|
MPI_COMM_WORLD); |
194 |
|
|
} else { |
195 |
|
|
|
196 |
gezelter |
1490 |
// Listen to your marching orders from processor 0: |
197 |
|
|
|
198 |
tim |
1723 |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0, |
199 |
|
|
MPI_COMM_WORLD); } |
200 |
gezelter |
1490 |
|
201 |
tim |
1723 |
// Let's all check for sanity: |
202 |
|
|
|
203 |
|
|
nmol_local = 0; |
204 |
|
|
|
205 |
|
|
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
206 |
|
|
if (MolToProcMap[i] == parallelData->myNode) { |
207 |
|
|
nmol_local++; |
208 |
|
|
} |
209 |
gezelter |
1490 |
} |
210 |
|
|
|
211 |
tim |
1723 |
MPI_Allreduce(&nmol_local, &nmol_global, 1, MPI_INT, MPI_SUM, |
212 |
|
|
MPI_COMM_WORLD); |
213 |
gezelter |
1490 |
|
214 |
tim |
1723 |
if (nmol_global != entryPlug->n_mol) { |
215 |
|
|
sprintf(painCave.errMsg, |
216 |
|
|
"The sum of all nmol_local, %d, did not equal the " |
217 |
|
|
"total number of molecules, %d.\n", |
218 |
|
|
nmol_global, |
219 |
|
|
entryPlug->n_mol); |
220 |
gezelter |
1490 |
|
221 |
tim |
1723 |
painCave.isFatal = 1; |
222 |
|
|
simError(); |
223 |
gezelter |
1490 |
} |
224 |
tim |
1722 |
|
225 |
tim |
1723 |
sprintf(checkPointMsg, |
226 |
|
|
"Successfully divided the molecules among the processors.\n"); |
227 |
|
|
MPIcheckPoint(); |
228 |
gezelter |
1490 |
|
229 |
tim |
1723 |
parallelData->nMolLocal = nmol_local; |
230 |
gezelter |
1490 |
|
231 |
tim |
1723 |
globalMolIndex.resize(parallelData->nMolLocal); |
232 |
|
|
local_index = 0; |
233 |
gezelter |
1490 |
|
234 |
tim |
1723 |
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
235 |
|
|
if (MolToProcMap[i] == parallelData->myNode) { |
236 |
|
|
globalMolIndex[local_index] = i; |
237 |
|
|
local_index++; |
238 |
|
|
} |
239 |
|
|
} |
240 |
|
|
} |
241 |
gezelter |
1490 |
|
242 |
tim |
1723 |
void mpiSimulation::mpiRefresh( void ) { |
243 |
|
|
int isError, i; |
244 |
|
|
int * localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
245 |
|
|
int * localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
246 |
gezelter |
1490 |
|
247 |
tim |
1723 |
// Fortran indexing needs to be increased by 1 in order to get the 2 |
248 |
|
|
// languages to not barf |
249 |
|
|
|
250 |
|
|
for( i = 0; i < parallelData->nAtomsLocal; i++ ) |
251 |
gezelter |
1490 |
localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; |
252 |
|
|
|
253 |
tim |
1723 |
for( i = 0; i < parallelData->nGroupsLocal; i++ ) |
254 |
gezelter |
1490 |
localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; |
255 |
|
|
|
256 |
tim |
1723 |
isError = 0; |
257 |
gezelter |
1490 |
|
258 |
tim |
1723 |
setFsimParallel(parallelData, &(parallelData->nAtomsLocal), |
259 |
|
|
localToGlobalAtomIndex, &(parallelData->nGroupsLocal), |
260 |
|
|
localToGlobalGroupIndex, &isError); |
261 |
gezelter |
1490 |
|
262 |
tim |
1723 |
if (isError) { |
263 |
|
|
sprintf(painCave.errMsg, |
264 |
|
|
"mpiRefresh errror: fortran didn't like something we gave it.\n"); |
265 |
|
|
painCave.isFatal = 1; |
266 |
|
|
simError(); |
267 |
|
|
} |
268 |
gezelter |
1490 |
|
269 |
tim |
1723 |
delete [] localToGlobalGroupIndex; |
270 |
|
|
delete [] localToGlobalAtomIndex; |
271 |
gezelter |
1490 |
|
272 |
tim |
1723 |
sprintf(checkPointMsg, " mpiRefresh successful.\n"); |
273 |
|
|
MPIcheckPoint(); |
274 |
gezelter |
1490 |
} |
275 |
|
|
|
276 |
|
|
#endif // is_mpi |