105 |
|
|
106 |
|
if (worldRank == masterNode) { |
107 |
|
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
108 |
– |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
108 |
|
#endif |
109 |
|
SimplePreprocessor preprocessor; |
110 |
|
preprocessor.preprocess(rawMetaDataStream, filename, |
114 |
|
//broadcasting the stream size |
115 |
|
streamSize = ppStream.str().size() +1; |
116 |
|
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
117 |
< |
MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
117 |
> |
MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
118 |
|
streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
120 |
– |
|
121 |
– |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
122 |
– |
// MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
123 |
– |
// streamSize, MPI::CHAR, masterNode); |
124 |
– |
|
119 |
|
} else { |
120 |
|
|
121 |
|
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
128 |
– |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
122 |
|
|
123 |
|
//get stream size |
124 |
|
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
132 |
– |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
125 |
|
char* buf = new char[streamSize]; |
126 |
|
assert(buf); |
127 |
|
|
128 |
|
//receive file content |
129 |
|
MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
138 |
– |
// MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
130 |
|
|
131 |
|
ppStream.str(buf); |
132 |
|
delete [] buf; |
522 |
|
// condition: |
523 |
|
|
524 |
|
MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); |
534 |
– |
//nProcessors = MPI::COMM_WORLD.Get_size(); |
525 |
|
|
526 |
|
if (nProcessors > nGlobalMols) { |
527 |
|
sprintf(painCave.errMsg, |
641 |
|
|
642 |
|
// Spray out this nonsense to all other processors: |
643 |
|
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
644 |
< |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
644 |
> |
|
645 |
|
} else { |
646 |
|
|
647 |
|
// Listen to your marching orders from processor 0: |
648 |
|
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
659 |
– |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
649 |
|
|
650 |
|
} |
651 |
|
|
922 |
|
MPI_Allreduce(&globalGroupMembership[0], |
923 |
|
&tmpGroupMembership[0], nGlobalAtoms, |
924 |
|
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
925 |
< |
// MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
937 |
< |
// &tmpGroupMembership[0], nGlobalAtoms, |
938 |
< |
// MPI::INT, MPI::SUM); |
925 |
> |
|
926 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
927 |
|
#else |
928 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
949 |
|
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
950 |
|
nGlobalAtoms + nGlobalRigidBodies, |
951 |
|
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
965 |
– |
// MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
966 |
– |
// nGlobalAtoms + nGlobalRigidBodies, |
967 |
– |
// MPI::INT, MPI::SUM); |
952 |
|
|
953 |
|
info->setGlobalMolMembership(tmpMolMembership); |
954 |
|
#else |
968 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
969 |
|
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
970 |
|
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
987 |
– |
// MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
988 |
– |
// info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
971 |
|
#else |
972 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
973 |
|
#endif |