| 15 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
| 16 |
|
{ |
| 17 |
|
entryPlug = the_entryPlug; |
| 18 |
< |
mpiPlug = new mpiSimData; |
| 18 |
> |
parallelData = new mpiSimData; |
| 19 |
|
|
| 20 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); |
| 21 |
< |
mpiPlug->myNode = worldRank; |
| 20 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
| 21 |
> |
parallelData->myNode = worldRank; |
| 22 |
|
|
| 23 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
| 24 |
|
MolComponentType = new int[entryPlug->n_mol]; |
| 25 |
|
AtomToProcMap = new int[entryPlug->n_atoms]; |
| 26 |
+ |
GroupToProcMap = new int[entryPlug->ngroup]; |
| 27 |
|
|
| 28 |
|
mpiSim = this; |
| 29 |
|
wrapMeSimParallel( this ); |
| 35 |
|
delete[] MolToProcMap; |
| 36 |
|
delete[] MolComponentType; |
| 37 |
|
delete[] AtomToProcMap; |
| 38 |
+ |
delete[] GroupToProcMap; |
| 39 |
|
|
| 40 |
< |
delete mpiPlug; |
| 40 |
> |
delete parallelData; |
| 41 |
|
// perhaps we should let fortran know the party is over. |
| 42 |
|
|
| 43 |
|
} |
| 49 |
|
randomSPRNG *myRandom; |
| 50 |
|
int* componentsNmol; |
| 51 |
|
int* AtomsPerProc; |
| 52 |
+ |
int* GroupsPerProc; |
| 53 |
|
|
| 54 |
|
double numerator; |
| 55 |
|
double denominator; |
| 56 |
|
double precast; |
| 57 |
|
double x, y, a; |
| 58 |
|
int old_atoms, add_atoms, new_atoms; |
| 59 |
+ |
int old_groups, add_groups, new_groups; |
| 60 |
|
|
| 61 |
|
int nTarget; |
| 62 |
< |
int molIndex, atomIndex; |
| 62 |
> |
int molIndex, atomIndex, groupIndex; |
| 63 |
|
int done; |
| 64 |
< |
int i, j, loops, which_proc, nmol_local, natoms_local; |
| 65 |
< |
int nmol_global, natoms_global; |
| 64 |
> |
int i, j, loops, which_proc; |
| 65 |
> |
int nmol_global, nmol_local; |
| 66 |
> |
int ngroups_global, ngroups_local; |
| 67 |
> |
int natoms_global, natoms_local; |
| 68 |
> |
int ncutoff_groups, nAtomsInGroups; |
| 69 |
|
int local_index; |
| 70 |
|
int baseSeed = entryPlug->getSeed(); |
| 71 |
+ |
CutoffGroupStamp* cg; |
| 72 |
|
|
| 73 |
|
nComponents = entryPlug->nComponents; |
| 74 |
|
compStamps = entryPlug->compStamps; |
| 75 |
|
componentsNmol = entryPlug->componentsNmol; |
| 76 |
< |
AtomsPerProc = new int[mpiPlug->nProcessors]; |
| 76 |
> |
AtomsPerProc = new int[parallelData->nProcessors]; |
| 77 |
> |
GroupsPerProc = new int[parallelData->nProcessors]; |
| 78 |
|
|
| 79 |
< |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
| 80 |
< |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
| 81 |
< |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
| 82 |
< |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
| 83 |
< |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
| 84 |
< |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
| 85 |
< |
mpiPlug->nGroupsGlobal = entryPlug->ngroup; |
| 79 |
> |
parallelData->nAtomsGlobal = entryPlug->n_atoms; |
| 80 |
> |
parallelData->nBondsGlobal = entryPlug->n_bonds; |
| 81 |
> |
parallelData->nBendsGlobal = entryPlug->n_bends; |
| 82 |
> |
parallelData->nTorsionsGlobal = entryPlug->n_torsions; |
| 83 |
> |
parallelData->nSRIGlobal = entryPlug->n_SRI; |
| 84 |
> |
parallelData->nGroupsGlobal = entryPlug->ngroup; |
| 85 |
> |
parallelData->nMolGlobal = entryPlug->n_mol; |
| 86 |
|
|
| 87 |
|
myRandom = new randomSPRNG( baseSeed ); |
| 88 |
|
|
| 89 |
< |
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
| 89 |
> |
a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal; |
| 90 |
|
|
| 91 |
|
// Initialize things that we'll send out later: |
| 92 |
< |
for (i = 0; i < mpiPlug->nProcessors; i++ ) { |
| 92 |
> |
for (i = 0; i < parallelData->nProcessors; i++ ) { |
| 93 |
|
AtomsPerProc[i] = 0; |
| 94 |
+ |
GroupsPerProc[i] = 0; |
| 95 |
|
} |
| 96 |
< |
for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
| 96 |
> |
for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
| 97 |
|
// default to an error condition: |
| 98 |
|
MolToProcMap[i] = -1; |
| 99 |
|
MolComponentType[i] = -1; |
| 100 |
|
} |
| 101 |
< |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
| 101 |
> |
for (i = 0; i < parallelData->nAtomsGlobal; i++ ) { |
| 102 |
|
// default to an error condition: |
| 103 |
|
AtomToProcMap[i] = -1; |
| 104 |
|
} |
| 105 |
+ |
for (i = 0; i < parallelData->nGroupsGlobal; i++ ) { |
| 106 |
+ |
// default to an error condition: |
| 107 |
+ |
GroupToProcMap[i] = -1; |
| 108 |
+ |
} |
| 109 |
|
|
| 110 |
< |
if (mpiPlug->myNode == 0) { |
| 110 |
> |
if (parallelData->myNode == 0) { |
| 111 |
|
numerator = (double) entryPlug->n_atoms; |
| 112 |
< |
denominator = (double) mpiPlug->nProcessors; |
| 112 |
> |
denominator = (double) parallelData->nProcessors; |
| 113 |
|
precast = numerator / denominator; |
| 114 |
|
nTarget = (int)( precast + 0.5 ); |
| 115 |
|
|
| 123 |
|
} |
| 124 |
|
|
| 125 |
|
atomIndex = 0; |
| 126 |
+ |
groupIndex = 0; |
| 127 |
|
|
| 128 |
|
for (i = 0; i < molIndex; i++ ) { |
| 129 |
|
|
| 135 |
|
|
| 136 |
|
// Pick a processor at random |
| 137 |
|
|
| 138 |
< |
which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); |
| 138 |
> |
which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors); |
| 139 |
|
|
| 140 |
|
// How many atoms does this processor have? |
| 141 |
|
|
| 143 |
|
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
| 144 |
|
new_atoms = old_atoms + add_atoms; |
| 145 |
|
|
| 146 |
+ |
old_groups = GroupsPerProc[which_proc]; |
| 147 |
+ |
ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups(); |
| 148 |
+ |
nAtomsInGroups = 0; |
| 149 |
+ |
for (j = 0; j < ncutoff_groups; j++) { |
| 150 |
+ |
cg = compStamps[MolComponentType[i]]->getCutoffGroup(j); |
| 151 |
+ |
nAtomsInGroups += cg->getNMembers(); |
| 152 |
+ |
} |
| 153 |
+ |
add_groups = add_atoms - nAtomsInGroups + ncutoff_groups; |
| 154 |
+ |
new_groups = old_groups + add_groups; |
| 155 |
+ |
|
| 156 |
|
// If we've been through this loop too many times, we need |
| 157 |
|
// to just give up and assign the molecule to this processor |
| 158 |
|
// and be done with it. |
| 172 |
|
AtomToProcMap[atomIndex] = which_proc; |
| 173 |
|
atomIndex++; |
| 174 |
|
} |
| 175 |
+ |
GroupsPerProc[which_proc] += add_groups; |
| 176 |
+ |
for (j=0; j < add_groups; j++) { |
| 177 |
+ |
GroupToProcMap[groupIndex] = which_proc; |
| 178 |
+ |
groupIndex++; |
| 179 |
+ |
} |
| 180 |
|
done = 1; |
| 181 |
|
continue; |
| 182 |
|
} |
| 191 |
|
AtomToProcMap[atomIndex] = which_proc; |
| 192 |
|
atomIndex++; |
| 193 |
|
} |
| 194 |
+ |
GroupsPerProc[which_proc] += add_groups; |
| 195 |
+ |
for (j=0; j < add_groups; j++) { |
| 196 |
+ |
GroupToProcMap[groupIndex] = which_proc; |
| 197 |
+ |
groupIndex++; |
| 198 |
+ |
} |
| 199 |
|
done = 1; |
| 200 |
|
continue; |
| 201 |
|
} |
| 219 |
|
AtomToProcMap[atomIndex] = which_proc; |
| 220 |
|
atomIndex++; |
| 221 |
|
} |
| 222 |
+ |
GroupsPerProc[which_proc] += add_groups; |
| 223 |
+ |
for (j=0; j < add_groups; j++) { |
| 224 |
+ |
GroupToProcMap[groupIndex] = which_proc; |
| 225 |
+ |
groupIndex++; |
| 226 |
+ |
} |
| 227 |
|
done = 1; |
| 228 |
|
continue; |
| 229 |
|
} else { |
| 233 |
|
} |
| 234 |
|
} |
| 235 |
|
|
| 236 |
+ |
|
| 237 |
|
// Spray out this nonsense to all other processors: |
| 238 |
|
|
| 239 |
< |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
| 239 |
> |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
| 240 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 241 |
|
|
| 242 |
< |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
| 242 |
> |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
| 243 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 244 |
|
|
| 245 |
< |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
| 245 |
> |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
| 246 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 247 |
|
|
| 248 |
< |
MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
| 248 |
> |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
| 249 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
| 250 |
> |
|
| 251 |
> |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
| 252 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 253 |
+ |
|
| 254 |
+ |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
| 255 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
| 256 |
|
} else { |
| 257 |
|
|
| 258 |
|
// Listen to your marching orders from processor 0: |
| 259 |
|
|
| 260 |
< |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
| 260 |
> |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
| 261 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 262 |
|
|
| 263 |
< |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
| 263 |
> |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
| 264 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 265 |
|
|
| 266 |
< |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
| 266 |
> |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
| 267 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 268 |
+ |
|
| 269 |
+ |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
| 270 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
| 271 |
|
|
| 272 |
< |
MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
| 272 |
> |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
| 273 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
| 274 |
|
|
| 275 |
+ |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
| 276 |
+ |
MPI_INT, 0, MPI_COMM_WORLD); |
| 277 |
|
|
| 278 |
+ |
|
| 279 |
|
} |
| 280 |
|
|
| 228 |
– |
|
| 281 |
|
// Let's all check for sanity: |
| 282 |
|
|
| 283 |
|
nmol_local = 0; |
| 284 |
< |
for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
| 285 |
< |
if (MolToProcMap[i] == mpiPlug->myNode) { |
| 284 |
> |
for (i = 0 ; i < parallelData->nMolGlobal; i++ ) { |
| 285 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
| 286 |
|
nmol_local++; |
| 287 |
|
} |
| 288 |
|
} |
| 289 |
|
|
| 290 |
|
natoms_local = 0; |
| 291 |
< |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
| 292 |
< |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
| 291 |
> |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
| 292 |
> |
if (AtomToProcMap[i] == parallelData->myNode) { |
| 293 |
|
natoms_local++; |
| 294 |
|
} |
| 295 |
|
} |
| 296 |
|
|
| 297 |
+ |
ngroups_local = 0; |
| 298 |
+ |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
| 299 |
+ |
if (GroupToProcMap[i] == parallelData->myNode) { |
| 300 |
+ |
ngroups_local++; |
| 301 |
+ |
} |
| 302 |
+ |
} |
| 303 |
+ |
|
| 304 |
|
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
| 305 |
|
MPI_COMM_WORLD); |
| 306 |
+ |
|
| 307 |
|
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
| 308 |
|
MPI_SUM, MPI_COMM_WORLD); |
| 309 |
+ |
|
| 310 |
+ |
MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT, |
| 311 |
+ |
MPI_SUM, MPI_COMM_WORLD); |
| 312 |
|
|
| 313 |
|
if( nmol_global != entryPlug->n_mol ){ |
| 314 |
|
sprintf( painCave.errMsg, |
| 328 |
|
simError(); |
| 329 |
|
} |
| 330 |
|
|
| 331 |
+ |
if( ngroups_global != entryPlug->ngroup ){ |
| 332 |
+ |
sprintf( painCave.errMsg, |
| 333 |
+ |
"The sum of all ngroups_local, %d, did not equal the " |
| 334 |
+ |
"total number of cutoffGroups, %d.\n", |
| 335 |
+ |
ngroups_global, entryPlug->ngroup ); |
| 336 |
+ |
painCave.isFatal = 1; |
| 337 |
+ |
simError(); |
| 338 |
+ |
} |
| 339 |
+ |
|
| 340 |
|
sprintf( checkPointMsg, |
| 341 |
|
"Successfully divided the molecules among the processors.\n" ); |
| 342 |
|
MPIcheckPoint(); |
| 343 |
|
|
| 344 |
< |
mpiPlug->nMolLocal = nmol_local; |
| 345 |
< |
mpiPlug->nAtomsLocal = natoms_local; |
| 344 |
> |
parallelData->nMolLocal = nmol_local; |
| 345 |
> |
parallelData->nAtomsLocal = natoms_local; |
| 346 |
> |
parallelData->nGroupsLocal = ngroups_local; |
| 347 |
|
|
| 348 |
< |
globalAtomIndex.resize(mpiPlug->nAtomsLocal); |
| 349 |
< |
globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); |
| 348 |
> |
globalAtomIndex.resize(parallelData->nAtomsLocal); |
| 349 |
> |
globalToLocalAtom.resize(parallelData->nAtomsGlobal); |
| 350 |
|
local_index = 0; |
| 351 |
< |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
| 352 |
< |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
| 351 |
> |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
| 352 |
> |
if (AtomToProcMap[i] == parallelData->myNode) { |
| 353 |
|
globalAtomIndex[local_index] = i; |
| 354 |
|
|
| 355 |
|
globalToLocalAtom[i] = local_index; |
| 360 |
|
globalToLocalAtom[i] = -1; |
| 361 |
|
} |
| 362 |
|
|
| 363 |
< |
globalMolIndex.resize(mpiPlug->nMolLocal); |
| 364 |
< |
globalToLocalMol.resize(mpiPlug->nMolGlobal); |
| 292 |
< |
|
| 363 |
> |
globalGroupIndex.resize(parallelData->nGroupsLocal); |
| 364 |
> |
globalToLocalGroup.resize(parallelData->nGroupsGlobal); |
| 365 |
|
local_index = 0; |
| 366 |
< |
for (i = 0; i < mpiPlug->nMolGlobal; i++) { |
| 367 |
< |
if (MolToProcMap[i] == mpiPlug->myNode) { |
| 366 |
> |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
| 367 |
> |
if (GroupToProcMap[i] == parallelData->myNode) { |
| 368 |
> |
globalGroupIndex[local_index] = i; |
| 369 |
> |
|
| 370 |
> |
globalToLocalGroup[i] = local_index; |
| 371 |
> |
local_index++; |
| 372 |
> |
|
| 373 |
> |
} |
| 374 |
> |
else |
| 375 |
> |
globalToLocalGroup[i] = -1; |
| 376 |
> |
} |
| 377 |
> |
|
| 378 |
> |
globalMolIndex.resize(parallelData->nMolLocal); |
| 379 |
> |
globalToLocalMol.resize(parallelData->nMolGlobal); |
| 380 |
> |
local_index = 0; |
| 381 |
> |
for (i = 0; i < parallelData->nMolGlobal; i++) { |
| 382 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
| 383 |
|
globalMolIndex[local_index] = i; |
| 384 |
|
globalToLocalMol[i] = local_index; |
| 385 |
|
local_index++; |
| 394 |
|
void mpiSimulation::mpiRefresh( void ){ |
| 395 |
|
|
| 396 |
|
int isError, i; |
| 397 |
< |
int *globalIndex = new int[mpiPlug->nAtomsLocal]; |
| 397 |
> |
int *globalAtomIndex = new int[parallelData->nAtomsLocal]; |
| 398 |
|
|
| 399 |
|
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
| 400 |
|
// not barf |
| 401 |
|
|
| 402 |
< |
for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
| 316 |
< |
|
| 402 |
> |
for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
| 403 |
|
|
| 404 |
|
isError = 0; |
| 405 |
< |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
| 405 |
> |
setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError ); |
| 406 |
|
if( isError ){ |
| 407 |
|
|
| 408 |
|
sprintf( painCave.errMsg, |
| 411 |
|
simError(); |
| 412 |
|
} |
| 413 |
|
|
| 414 |
< |
delete[] globalIndex; |
| 414 |
> |
delete[] globalAtomIndex; |
| 415 |
|
|
| 416 |
+ |
|
| 417 |
|
sprintf( checkPointMsg, |
| 418 |
|
" mpiRefresh successful.\n" ); |
| 419 |
|
MPIcheckPoint(); |