220 |
|
|
221 |
|
MPI_Bcast(read_buffer, BUFFERSIZE, MPI_CHAR, 0, MPI_COMM_WORLD); |
222 |
|
|
223 |
– |
cerr << "node " << worldRank << " finished MPI_Bcast" << endl; |
223 |
|
|
224 |
|
parseErr = parseCommentLine( read_buffer, simnfo); |
225 |
|
|
269 |
|
|
270 |
|
MPI_Recv(&nCurObj, 1, MPI_INT, which_node, |
271 |
|
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
272 |
< |
cerr << "node " << worldRank << " finished MPI_Send" << endl; |
272 |
> |
|
273 |
|
for(j=0; j < nCurObj; j++){ |
274 |
|
|
275 |
|
eof_test = fgets(read_buffer, sizeof(read_buffer), c_in_file); |
287 |
|
|
288 |
|
MPI_Send(read_buffer, BUFFERSIZE, MPI_CHAR, which_node, |
289 |
|
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
290 |
< |
cerr << "node " << worldRank << " finished MPI_Send" << endl; |
290 |
> |
|
291 |
|
} |
292 |
|
|
293 |
|
} |
300 |
|
|
301 |
|
MPI_Bcast(read_buffer, BUFFERSIZE, MPI_CHAR, 0, MPI_COMM_WORLD); |
302 |
|
|
304 |
– |
cerr << "node " << worldRank << " finished MPI_Bcast" << endl; |
303 |
|
parseErr = parseCommentLine( read_buffer, simnfo); |
304 |
|
|
305 |
|
if( parseErr != NULL ){ |
329 |
|
MPI_Send(&nCurObj, 1, MPI_INT, 0, |
330 |
|
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
331 |
|
|
334 |
– |
cerr << "node " << worldRank << " finished MPI_Send" << endl; |
332 |
|
for(j = 0; j < integrableObjects.size(); j++){ |
333 |
|
|
334 |
|
MPI_Recv(read_buffer, BUFFERSIZE, MPI_CHAR, 0, |
335 |
|
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
336 |
|
|
340 |
– |
cerr << "node " << worldRank << " finished MPI_Recv" << endl; |
337 |
|
parseErr = parseDumpLine(read_buffer, integrableObjects[j]); |
338 |
|
|
339 |
|
if( parseErr != NULL ){ |
649 |
|
int j, myStatus; |
650 |
|
|
651 |
|
myStatus = 0; |
652 |
< |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
652 |
> |
for (j = 0; j < mpiSim->getNprocessors(); j++) { |
653 |
|
MPI_Send( &myStatus, 1, MPI_INT, j, |
654 |
|
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
655 |
|
} |