1 |
|
#ifdef IS_MPI |
2 |
< |
|
3 |
< |
#include <cstdlib> |
4 |
< |
#include <cstring> |
2 |
> |
#include <iostream> |
3 |
> |
#include <stdlib.h> |
4 |
> |
#include <string.h> |
5 |
> |
#include <math.h> |
6 |
|
#include <mpi.h> |
6 |
– |
#include <mpi++.h> |
7 |
|
|
8 |
|
#include "mpiSimulation.hpp" |
9 |
|
#include "simError.h" |
10 |
|
#include "fortranWrappers.hpp" |
11 |
+ |
#include "randomSPRNG.hpp" |
12 |
|
|
12 |
– |
|
13 |
– |
|
14 |
– |
|
13 |
|
mpiSimulation* mpiSim; |
14 |
|
|
15 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
16 |
|
{ |
17 |
|
entryPlug = the_entryPlug; |
18 |
< |
mpiPlug = new mpiSimData; |
18 |
> |
parallelData = new mpiSimData; |
19 |
|
|
20 |
< |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
21 |
< |
mpiPlug->myNode = worldRank; |
22 |
< |
|
20 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
21 |
> |
parallelData->myNode = worldRank; |
22 |
> |
|
23 |
> |
MolToProcMap = new int[entryPlug->n_mol]; |
24 |
> |
MolComponentType = new int[entryPlug->n_mol]; |
25 |
> |
AtomToProcMap = new int[entryPlug->n_atoms]; |
26 |
> |
GroupToProcMap = new int[entryPlug->ngroup]; |
27 |
> |
|
28 |
|
mpiSim = this; |
29 |
|
wrapMeSimParallel( this ); |
30 |
|
} |
32 |
|
|
33 |
|
mpiSimulation::~mpiSimulation(){ |
34 |
|
|
35 |
< |
delete mpiPlug; |
35 |
> |
delete[] MolToProcMap; |
36 |
> |
delete[] MolComponentType; |
37 |
> |
delete[] AtomToProcMap; |
38 |
> |
delete[] GroupToProcMap; |
39 |
> |
|
40 |
> |
delete parallelData; |
41 |
|
// perhaps we should let fortran know the party is over. |
42 |
|
|
43 |
|
} |
44 |
|
|
45 |
+ |
void mpiSimulation::divideLabor( ){ |
46 |
|
|
38 |
– |
|
39 |
– |
int* mpiSimulation::divideLabor( void ){ |
40 |
– |
|
41 |
– |
int* globalIndex; |
42 |
– |
|
47 |
|
int nComponents; |
48 |
|
MoleculeStamp** compStamps; |
49 |
+ |
randomSPRNG *myRandom; |
50 |
|
int* componentsNmol; |
51 |
+ |
int* AtomsPerProc; |
52 |
+ |
int* GroupsPerProc; |
53 |
|
|
54 |
|
double numerator; |
55 |
|
double denominator; |
56 |
|
double precast; |
57 |
+ |
double x, y, a; |
58 |
+ |
int old_atoms, add_atoms, new_atoms; |
59 |
+ |
int old_groups, add_groups, new_groups; |
60 |
|
|
61 |
|
int nTarget; |
62 |
< |
int molIndex, atomIndex, compIndex, compStart; |
62 |
> |
int molIndex, atomIndex, groupIndex; |
63 |
|
int done; |
64 |
< |
int nLocal, molLocal; |
65 |
< |
int i, index; |
66 |
< |
int smallDiff, bigDiff; |
64 |
> |
int i, j, loops, which_proc; |
65 |
> |
int nmol_global, nmol_local; |
66 |
> |
int ngroups_global, ngroups_local; |
67 |
> |
int natoms_global, natoms_local; |
68 |
> |
int ncutoff_groups, nAtomsInGroups; |
69 |
> |
int local_index; |
70 |
> |
int baseSeed = entryPlug->getSeed(); |
71 |
> |
CutoffGroupStamp* cg; |
72 |
|
|
58 |
– |
int testSum; |
59 |
– |
|
73 |
|
nComponents = entryPlug->nComponents; |
74 |
|
compStamps = entryPlug->compStamps; |
75 |
|
componentsNmol = entryPlug->componentsNmol; |
76 |
+ |
AtomsPerProc = new int[parallelData->nProcessors]; |
77 |
+ |
GroupsPerProc = new int[parallelData->nProcessors]; |
78 |
+ |
|
79 |
+ |
parallelData->nAtomsGlobal = entryPlug->n_atoms; |
80 |
+ |
parallelData->nBondsGlobal = entryPlug->n_bonds; |
81 |
+ |
parallelData->nBendsGlobal = entryPlug->n_bends; |
82 |
+ |
parallelData->nTorsionsGlobal = entryPlug->n_torsions; |
83 |
+ |
parallelData->nSRIGlobal = entryPlug->n_SRI; |
84 |
+ |
parallelData->nGroupsGlobal = entryPlug->ngroup; |
85 |
+ |
parallelData->nMolGlobal = entryPlug->n_mol; |
86 |
|
|
87 |
< |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
65 |
< |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
66 |
< |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
67 |
< |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
68 |
< |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
69 |
< |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
87 |
> |
myRandom = new randomSPRNG( baseSeed ); |
88 |
|
|
89 |
< |
numerator = (double) entryPlug->n_atoms; |
90 |
< |
denominator = (double) mpiPlug->numberProcessors; |
91 |
< |
precast = numerator / denominator; |
92 |
< |
nTarget = (int)( precast + 0.5 ); |
93 |
< |
|
94 |
< |
molIndex = 0; |
95 |
< |
atomIndex = 0; |
96 |
< |
compIndex = 0; |
97 |
< |
compStart = 0; |
98 |
< |
for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
89 |
> |
a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal; |
90 |
> |
|
91 |
> |
// Initialize things that we'll send out later: |
92 |
> |
for (i = 0; i < parallelData->nProcessors; i++ ) { |
93 |
> |
AtomsPerProc[i] = 0; |
94 |
> |
GroupsPerProc[i] = 0; |
95 |
> |
} |
96 |
> |
for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
97 |
> |
// default to an error condition: |
98 |
> |
MolToProcMap[i] = -1; |
99 |
> |
MolComponentType[i] = -1; |
100 |
> |
} |
101 |
> |
for (i = 0; i < parallelData->nAtomsGlobal; i++ ) { |
102 |
> |
// default to an error condition: |
103 |
> |
AtomToProcMap[i] = -1; |
104 |
> |
} |
105 |
> |
for (i = 0; i < parallelData->nGroupsGlobal; i++ ) { |
106 |
> |
// default to an error condition: |
107 |
> |
GroupToProcMap[i] = -1; |
108 |
> |
} |
109 |
|
|
110 |
< |
done = 0; |
111 |
< |
nLocal = 0; |
112 |
< |
molLocal = 0; |
110 |
> |
if (parallelData->myNode == 0) { |
111 |
> |
numerator = (double) entryPlug->n_atoms; |
112 |
> |
denominator = (double) parallelData->nProcessors; |
113 |
> |
precast = numerator / denominator; |
114 |
> |
nTarget = (int)( precast + 0.5 ); |
115 |
|
|
116 |
< |
if( i == mpiPlug->myNode ){ |
117 |
< |
mpiPlug->myMolStart = molIndex; |
118 |
< |
mpiPlug->myAtomStart = atomIndex; |
116 |
> |
// Build the array of molecule component types first |
117 |
> |
molIndex = 0; |
118 |
> |
for (i=0; i < nComponents; i++) { |
119 |
> |
for (j=0; j < componentsNmol[i]; j++) { |
120 |
> |
MolComponentType[molIndex] = i; |
121 |
> |
molIndex++; |
122 |
> |
} |
123 |
|
} |
124 |
+ |
|
125 |
+ |
atomIndex = 0; |
126 |
+ |
groupIndex = 0; |
127 |
+ |
|
128 |
+ |
for (i = 0; i < molIndex; i++ ) { |
129 |
+ |
|
130 |
+ |
done = 0; |
131 |
+ |
loops = 0; |
132 |
+ |
|
133 |
+ |
while( !done ){ |
134 |
+ |
loops++; |
135 |
+ |
|
136 |
+ |
// Pick a processor at random |
137 |
+ |
|
138 |
+ |
which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors); |
139 |
+ |
|
140 |
+ |
// How many atoms does this processor have? |
141 |
+ |
|
142 |
+ |
old_atoms = AtomsPerProc[which_proc]; |
143 |
+ |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
144 |
+ |
new_atoms = old_atoms + add_atoms; |
145 |
+ |
|
146 |
+ |
old_groups = GroupsPerProc[which_proc]; |
147 |
+ |
ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups(); |
148 |
+ |
nAtomsInGroups = 0; |
149 |
+ |
for (j = 0; j < ncutoff_groups; j++) { |
150 |
+ |
cg = compStamps[MolComponentType[i]]->getCutoffGroup(j); |
151 |
+ |
nAtomsInGroups += cg->getNMembers(); |
152 |
+ |
} |
153 |
+ |
add_groups = add_atoms - nAtomsInGroups + ncutoff_groups; |
154 |
+ |
new_groups = old_groups + add_groups; |
155 |
+ |
|
156 |
+ |
// If we've been through this loop too many times, we need |
157 |
+ |
// to just give up and assign the molecule to this processor |
158 |
+ |
// and be done with it. |
159 |
+ |
|
160 |
+ |
if (loops > 100) { |
161 |
+ |
sprintf( painCave.errMsg, |
162 |
+ |
"I've tried 100 times to assign molecule %d to a " |
163 |
+ |
" processor, but can't find a good spot.\n" |
164 |
+ |
"I'm assigning it at random to processor %d.\n", |
165 |
+ |
i, which_proc); |
166 |
+ |
painCave.isFatal = 0; |
167 |
+ |
simError(); |
168 |
+ |
|
169 |
+ |
MolToProcMap[i] = which_proc; |
170 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
171 |
+ |
for (j = 0 ; j < add_atoms; j++ ) { |
172 |
+ |
AtomToProcMap[atomIndex] = which_proc; |
173 |
+ |
atomIndex++; |
174 |
+ |
} |
175 |
+ |
GroupsPerProc[which_proc] += add_groups; |
176 |
+ |
for (j=0; j < add_groups; j++) { |
177 |
+ |
GroupToProcMap[groupIndex] = which_proc; |
178 |
+ |
groupIndex++; |
179 |
+ |
} |
180 |
+ |
done = 1; |
181 |
+ |
continue; |
182 |
+ |
} |
183 |
|
|
184 |
< |
while( !done ){ |
185 |
< |
|
186 |
< |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
187 |
< |
compStart = molIndex; |
188 |
< |
compIndex++; |
189 |
< |
continue; |
190 |
< |
} |
184 |
> |
// If we can add this molecule to this processor without sending |
185 |
> |
// it above nTarget, then go ahead and do it: |
186 |
> |
|
187 |
> |
if (new_atoms <= nTarget) { |
188 |
> |
MolToProcMap[i] = which_proc; |
189 |
> |
AtomsPerProc[which_proc] += add_atoms; |
190 |
> |
for (j = 0 ; j < add_atoms; j++ ) { |
191 |
> |
AtomToProcMap[atomIndex] = which_proc; |
192 |
> |
atomIndex++; |
193 |
> |
} |
194 |
> |
GroupsPerProc[which_proc] += add_groups; |
195 |
> |
for (j=0; j < add_groups; j++) { |
196 |
> |
GroupToProcMap[groupIndex] = which_proc; |
197 |
> |
groupIndex++; |
198 |
> |
} |
199 |
> |
done = 1; |
200 |
> |
continue; |
201 |
> |
} |
202 |
|
|
203 |
< |
nLocal += compStamps[compIndex]->getNAtoms(); |
204 |
< |
atomIndex += compStamps[compIndex]->getNAtoms(); |
205 |
< |
molIndex++; |
206 |
< |
molLocal++; |
203 |
> |
|
204 |
> |
// The only situation left is when new_atoms > nTarget. We |
205 |
> |
// want to accept this with some probability that dies off the |
206 |
> |
// farther we are from nTarget |
207 |
> |
|
208 |
> |
// roughly: x = new_atoms - nTarget |
209 |
> |
// Pacc(x) = exp(- a * x) |
210 |
> |
// where a = penalty / (average atoms per molecule) |
211 |
> |
|
212 |
> |
x = (double) (new_atoms - nTarget); |
213 |
> |
y = myRandom->getRandom(); |
214 |
|
|
215 |
< |
if ( nLocal == nTarget ) done = 1; |
216 |
< |
|
217 |
< |
else if( nLocal < nTarget ){ |
218 |
< |
smallDiff = nTarget - nLocal; |
219 |
< |
} |
220 |
< |
else if( nLocal > nTarget ){ |
221 |
< |
bigDiff = nLocal - nTarget; |
222 |
< |
|
223 |
< |
if( bigDiff < smallDiff ) done = 1; |
224 |
< |
else{ |
225 |
< |
molIndex--; |
226 |
< |
molLocal--; |
227 |
< |
atomIndex -= compStamps[compIndex]->getNAtoms(); |
228 |
< |
nLocal -= compStamps[compIndex]->getNAtoms(); |
229 |
< |
done = 1; |
230 |
< |
} |
215 |
> |
if (y < exp(- a * x)) { |
216 |
> |
MolToProcMap[i] = which_proc; |
217 |
> |
AtomsPerProc[which_proc] += add_atoms; |
218 |
> |
for (j = 0 ; j < add_atoms; j++ ) { |
219 |
> |
AtomToProcMap[atomIndex] = which_proc; |
220 |
> |
atomIndex++; |
221 |
> |
} |
222 |
> |
GroupsPerProc[which_proc] += add_groups; |
223 |
> |
for (j=0; j < add_groups; j++) { |
224 |
> |
GroupToProcMap[groupIndex] = which_proc; |
225 |
> |
groupIndex++; |
226 |
> |
} |
227 |
> |
done = 1; |
228 |
> |
continue; |
229 |
> |
} else { |
230 |
> |
continue; |
231 |
> |
} |
232 |
> |
|
233 |
|
} |
234 |
|
} |
235 |
+ |
|
236 |
+ |
|
237 |
+ |
// Spray out this nonsense to all other processors: |
238 |
+ |
|
239 |
+ |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
240 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
241 |
+ |
|
242 |
+ |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
243 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
244 |
+ |
|
245 |
+ |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
246 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
247 |
+ |
|
248 |
+ |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
249 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
250 |
+ |
|
251 |
+ |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
252 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
253 |
+ |
|
254 |
+ |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
255 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
256 |
+ |
} else { |
257 |
+ |
|
258 |
+ |
// Listen to your marching orders from processor 0: |
259 |
|
|
260 |
< |
if( i == mpiPlug->myNode ){ |
261 |
< |
mpiPlug->myMolEnd = (molIndex - 1); |
125 |
< |
mpiPlug->myAtomEnd = (atomIndex - 1); |
126 |
< |
mpiPlug->myNlocal = nLocal; |
127 |
< |
mpiPlug->myMol = molLocal; |
128 |
< |
} |
260 |
> |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
261 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
262 |
|
|
263 |
< |
numerator = (double)( entryPlug->n_atoms - atomIndex ); |
264 |
< |
denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
265 |
< |
precast = numerator / denominator; |
266 |
< |
nTarget = (int)( precast + 0.5 ); |
263 |
> |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
264 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
265 |
> |
|
266 |
> |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
267 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
268 |
> |
|
269 |
> |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
270 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
271 |
> |
|
272 |
> |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
273 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
274 |
> |
|
275 |
> |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
276 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
277 |
> |
|
278 |
> |
|
279 |
|
} |
135 |
– |
|
136 |
– |
if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
137 |
– |
mpiPlug->myMolStart = molIndex; |
138 |
– |
mpiPlug->myAtomStart = atomIndex; |
280 |
|
|
281 |
< |
nLocal = 0; |
141 |
< |
molLocal = 0; |
142 |
< |
while( compIndex < nComponents ){ |
143 |
< |
|
144 |
< |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
145 |
< |
compStart = molIndex; |
146 |
< |
compIndex++; |
147 |
< |
continue; |
148 |
< |
} |
281 |
> |
// Let's all check for sanity: |
282 |
|
|
283 |
< |
nLocal += compStamps[compIndex]->getNAtoms(); |
284 |
< |
atomIndex += compStamps[compIndex]->getNAtoms(); |
285 |
< |
molIndex++; |
286 |
< |
molLocal++; |
287 |
< |
} |
155 |
< |
|
156 |
< |
mpiPlug->myMolEnd = (molIndex - 1); |
157 |
< |
mpiPlug->myAtomEnd = (atomIndex - 1); |
158 |
< |
mpiPlug->myNlocal = nLocal; |
159 |
< |
mpiPlug->myMol = molLocal; |
283 |
> |
nmol_local = 0; |
284 |
> |
for (i = 0 ; i < parallelData->nMolGlobal; i++ ) { |
285 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
286 |
> |
nmol_local++; |
287 |
> |
} |
288 |
|
} |
289 |
|
|
290 |
+ |
natoms_local = 0; |
291 |
+ |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
292 |
+ |
if (AtomToProcMap[i] == parallelData->myNode) { |
293 |
+ |
natoms_local++; |
294 |
+ |
} |
295 |
+ |
} |
296 |
|
|
297 |
< |
MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
298 |
< |
|
299 |
< |
if( mpiPlug->myNode == 0 ){ |
300 |
< |
if( testSum != entryPlug->n_atoms ){ |
167 |
< |
sprintf( painCave.errMsg, |
168 |
< |
"The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
169 |
< |
testSum, entryPlug->n_atoms ); |
170 |
< |
painCave.isFatal = 1; |
171 |
< |
simError(); |
297 |
> |
ngroups_local = 0; |
298 |
> |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
299 |
> |
if (GroupToProcMap[i] == parallelData->myNode) { |
300 |
> |
ngroups_local++; |
301 |
|
} |
302 |
|
} |
303 |
|
|
304 |
+ |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
305 |
+ |
MPI_COMM_WORLD); |
306 |
+ |
|
307 |
+ |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
308 |
+ |
MPI_SUM, MPI_COMM_WORLD); |
309 |
+ |
|
310 |
+ |
MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT, |
311 |
+ |
MPI_SUM, MPI_COMM_WORLD); |
312 |
+ |
|
313 |
+ |
if( nmol_global != entryPlug->n_mol ){ |
314 |
+ |
sprintf( painCave.errMsg, |
315 |
+ |
"The sum of all nmol_local, %d, did not equal the " |
316 |
+ |
"total number of molecules, %d.\n", |
317 |
+ |
nmol_global, entryPlug->n_mol ); |
318 |
+ |
painCave.isFatal = 1; |
319 |
+ |
simError(); |
320 |
+ |
} |
321 |
+ |
|
322 |
+ |
if( natoms_global != entryPlug->n_atoms ){ |
323 |
+ |
sprintf( painCave.errMsg, |
324 |
+ |
"The sum of all natoms_local, %d, did not equal the " |
325 |
+ |
"total number of atoms, %d.\n", |
326 |
+ |
natoms_global, entryPlug->n_atoms ); |
327 |
+ |
painCave.isFatal = 1; |
328 |
+ |
simError(); |
329 |
+ |
} |
330 |
+ |
|
331 |
+ |
if( ngroups_global != entryPlug->ngroup ){ |
332 |
+ |
sprintf( painCave.errMsg, |
333 |
+ |
"The sum of all ngroups_local, %d, did not equal the " |
334 |
+ |
"total number of cutoffGroups, %d.\n", |
335 |
+ |
ngroups_global, entryPlug->ngroup ); |
336 |
+ |
painCave.isFatal = 1; |
337 |
+ |
simError(); |
338 |
+ |
} |
339 |
+ |
|
340 |
|
sprintf( checkPointMsg, |
341 |
|
"Successfully divided the molecules among the processors.\n" ); |
342 |
|
MPIcheckPoint(); |
343 |
|
|
344 |
< |
// lets create the identity array |
344 |
> |
parallelData->nMolLocal = nmol_local; |
345 |
> |
parallelData->nAtomsLocal = natoms_local; |
346 |
> |
parallelData->nGroupsLocal = ngroups_local; |
347 |
|
|
348 |
< |
globalIndex = new int[mpiPlug->myNlocal]; |
349 |
< |
index = mpiPlug->myAtomStart; |
350 |
< |
for( i=0; i<mpiPlug->myNlocal; i++){ |
351 |
< |
globalIndex[i] = index; |
352 |
< |
index++; |
348 |
> |
globalAtomIndex.resize(parallelData->nAtomsLocal); |
349 |
> |
globalToLocalAtom.resize(parallelData->nAtomsGlobal); |
350 |
> |
local_index = 0; |
351 |
> |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
352 |
> |
if (AtomToProcMap[i] == parallelData->myNode) { |
353 |
> |
globalAtomIndex[local_index] = i; |
354 |
> |
|
355 |
> |
globalToLocalAtom[i] = local_index; |
356 |
> |
local_index++; |
357 |
> |
|
358 |
> |
} |
359 |
> |
else |
360 |
> |
globalToLocalAtom[i] = -1; |
361 |
|
} |
362 |
|
|
363 |
< |
return globalIndex; |
363 |
> |
globalGroupIndex.resize(parallelData->nGroupsLocal); |
364 |
> |
globalToLocalGroup.resize(parallelData->nGroupsGlobal); |
365 |
> |
local_index = 0; |
366 |
> |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
367 |
> |
if (GroupToProcMap[i] == parallelData->myNode) { |
368 |
> |
globalGroupIndex[local_index] = i; |
369 |
> |
|
370 |
> |
globalToLocalGroup[i] = local_index; |
371 |
> |
local_index++; |
372 |
> |
|
373 |
> |
} |
374 |
> |
else |
375 |
> |
globalToLocalGroup[i] = -1; |
376 |
> |
} |
377 |
> |
|
378 |
> |
globalMolIndex.resize(parallelData->nMolLocal); |
379 |
> |
globalToLocalMol.resize(parallelData->nMolGlobal); |
380 |
> |
local_index = 0; |
381 |
> |
for (i = 0; i < parallelData->nMolGlobal; i++) { |
382 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
383 |
> |
globalMolIndex[local_index] = i; |
384 |
> |
globalToLocalMol[i] = local_index; |
385 |
> |
local_index++; |
386 |
> |
} |
387 |
> |
else |
388 |
> |
globalToLocalMol[i] = -1; |
389 |
> |
} |
390 |
> |
|
391 |
|
} |
392 |
|
|
393 |
|
|
394 |
|
void mpiSimulation::mpiRefresh( void ){ |
395 |
|
|
396 |
|
int isError, i; |
397 |
< |
int *globalIndex = new int[mpiPlug->myNlocal]; |
397 |
> |
int *globalAtomIndex = new int[parallelData->nAtomsLocal]; |
398 |
|
|
399 |
< |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
399 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
400 |
> |
// not barf |
401 |
|
|
402 |
+ |
for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
403 |
|
|
404 |
|
isError = 0; |
405 |
< |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
405 |
> |
setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError ); |
406 |
|
if( isError ){ |
407 |
|
|
408 |
|
sprintf( painCave.errMsg, |
411 |
|
simError(); |
412 |
|
} |
413 |
|
|
414 |
< |
delete[] globalIndex; |
414 |
> |
delete[] globalAtomIndex; |
415 |
|
|
416 |
+ |
|
417 |
|
sprintf( checkPointMsg, |
418 |
|
" mpiRefresh successful.\n" ); |
419 |
|
MPIcheckPoint(); |