236 |
|
|
237 |
|
// Spray out this nonsense to all other processors: |
238 |
|
|
239 |
+ |
//std::cerr << "node 0 mol2proc = \n"; |
240 |
+ |
//for (i = 0; i < parallelData->nMolGlobal; i++) |
241 |
+ |
// std::cerr << i << "\t" << MolToProcMap[i] << "\n"; |
242 |
+ |
|
243 |
|
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
244 |
|
MPI_INT, 0, MPI_COMM_WORLD); |
245 |
|
|
398 |
|
void mpiSimulation::mpiRefresh( void ){ |
399 |
|
|
400 |
|
int isError, i; |
401 |
< |
int *globalAtomIndex = new int[parallelData->nAtomsLocal]; |
402 |
< |
|
399 |
< |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
400 |
< |
// not barf |
401 |
> |
int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
402 |
> |
int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
403 |
|
|
404 |
< |
for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
404 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 |
405 |
> |
// languages to not barf |
406 |
> |
|
407 |
> |
for(i = 0; i < parallelData->nAtomsLocal; i++) |
408 |
> |
localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; |
409 |
> |
|
410 |
> |
for(i = 0; i < parallelData->nGroupsLocal; i++) |
411 |
> |
localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; |
412 |
|
|
413 |
|
isError = 0; |
414 |
< |
setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError ); |
414 |
> |
|
415 |
> |
setFsimParallel( parallelData, |
416 |
> |
&(parallelData->nAtomsLocal), localToGlobalAtomIndex, |
417 |
> |
&(parallelData->nGroupsLocal), localToGlobalGroupIndex, |
418 |
> |
&isError ); |
419 |
> |
|
420 |
|
if( isError ){ |
421 |
|
|
422 |
|
sprintf( painCave.errMsg, |
425 |
|
simError(); |
426 |
|
} |
427 |
|
|
428 |
< |
delete[] globalAtomIndex; |
428 |
> |
delete[] localToGlobalGroupIndex; |
429 |
> |
delete[] localToGlobalAtomIndex; |
430 |
|
|
431 |
|
|
432 |
|
sprintf( checkPointMsg, |