302 |
|
void DumpWriter::writeFrame(std::ostream& os) { |
303 |
|
|
304 |
|
#ifdef IS_MPI |
305 |
< |
MPI_Status istatus; |
305 |
> |
MPI::Status istatus; |
306 |
|
#endif |
307 |
|
|
308 |
|
Molecule* mol; |
331 |
|
|
332 |
|
if (doSiteData_) { |
333 |
|
os << " <SiteData>\n"; |
334 |
< |
for (mol = info_->beginMolecule(mi); mol != NULL; mol = info_->nextMolecule(mi)) { |
334 |
> |
for (mol = info_->beginMolecule(mi); mol != NULL; |
335 |
> |
mol = info_->nextMolecule(mi)) { |
336 |
|
|
337 |
|
for (sd = mol->beginIntegrableObject(ii); sd != NULL; |
338 |
|
sd = mol->nextIntegrableObject(ii)) { |
359 |
|
|
360 |
|
os.flush(); |
361 |
|
#else |
361 |
– |
//every node prepares the dump lines for integrable objects belong to itself |
362 |
– |
std::string buffer; |
363 |
– |
for (mol = info_->beginMolecule(mi); mol != NULL; mol = info_->nextMolecule(mi)) { |
362 |
|
|
363 |
+ |
const int masterNode = 0; |
364 |
+ |
int worldRank = MPI::COMM_WORLD.Get_rank(); |
365 |
+ |
int nProc = MPI::COMM_WORLD.Get_size(); |
366 |
|
|
367 |
+ |
if (worldRank == masterNode) { |
368 |
+ |
os << " <Snapshot>\n"; |
369 |
+ |
writeFrameProperties(os, |
370 |
+ |
info_->getSnapshotManager()->getCurrentSnapshot()); |
371 |
+ |
os << " <StuntDoubles>\n"; |
372 |
+ |
} |
373 |
+ |
|
374 |
+ |
//every node prepares the dump lines for integrable objects belong to itself |
375 |
+ |
std::string buffer; |
376 |
+ |
for (mol = info_->beginMolecule(mi); mol != NULL; |
377 |
+ |
mol = info_->nextMolecule(mi)) { |
378 |
|
for (sd = mol->beginIntegrableObject(ii); sd != NULL; |
379 |
|
sd = mol->nextIntegrableObject(ii)) { |
380 |
< |
buffer += prepareDumpLine(sd); |
380 |
> |
buffer += prepareDumpLine(sd); |
381 |
|
} |
382 |
|
} |
383 |
|
|
372 |
– |
const int masterNode = 0; |
373 |
– |
int nProc; |
374 |
– |
MPI_Comm_size(MPI_COMM_WORLD, &nProc); |
384 |
|
if (worldRank == masterNode) { |
376 |
– |
os << " <Snapshot>\n"; |
377 |
– |
writeFrameProperties(os, info_->getSnapshotManager()->getCurrentSnapshot()); |
378 |
– |
os << " <StuntDoubles>\n"; |
379 |
– |
|
385 |
|
os << buffer; |
386 |
< |
|
386 |
> |
|
387 |
|
for (int i = 1; i < nProc; ++i) { |
388 |
+ |
// tell processor i to start sending us data: |
389 |
+ |
MPI::COMM_WORLD.Bcast(&i, 1, MPI::INT, masterNode); |
390 |
|
|
391 |
|
// receive the length of the string buffer that was |
392 |
< |
// prepared by processor i |
386 |
< |
|
387 |
< |
MPI_Bcast(&i, 1, MPI_INT,masterNode,MPI_COMM_WORLD); |
392 |
> |
// prepared by processor i: |
393 |
|
int recvLength; |
394 |
< |
MPI_Recv(&recvLength, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &istatus); |
394 |
> |
MPI::COMM_WORLD.Recv(&recvLength, 1, MPI::INT, i, MPI::ANY_TAG, |
395 |
> |
istatus); |
396 |
> |
|
397 |
> |
// create a buffer to receive the data |
398 |
|
char* recvBuffer = new char[recvLength]; |
399 |
|
if (recvBuffer == NULL) { |
400 |
|
} else { |
401 |
< |
MPI_Recv(recvBuffer, recvLength, MPI_CHAR, i, 0, MPI_COMM_WORLD, &istatus); |
401 |
> |
// receive the data: |
402 |
> |
MPI::COMM_WORLD.Recv(recvBuffer, recvLength, MPI::CHAR, i, |
403 |
> |
MPI::ANY_TAG, istatus); |
404 |
> |
// send it to the file: |
405 |
|
os << recvBuffer; |
406 |
+ |
// get rid of the receive buffer: |
407 |
|
delete [] recvBuffer; |
408 |
|
} |
409 |
|
} |
398 |
– |
os << " </StuntDoubles>\n"; |
399 |
– |
|
400 |
– |
os << " </Snapshot>\n"; |
401 |
– |
os.flush(); |
410 |
|
} else { |
411 |
|
int sendBufferLength = buffer.size() + 1; |
412 |
|
int myturn = 0; |
413 |
|
for (int i = 1; i < nProc; ++i){ |
414 |
< |
MPI_Bcast(&myturn,1, MPI_INT,masterNode,MPI_COMM_WORLD); |
414 |
> |
// wait for the master node to call our number: |
415 |
> |
MPI::COMM_WORLD.Bcast(&myturn, 1, MPI::INT, masterNode); |
416 |
|
if (myturn == worldRank){ |
417 |
< |
MPI_Send(&sendBufferLength, 1, MPI_INT, masterNode, 0, MPI_COMM_WORLD); |
418 |
< |
MPI_Send((void *)buffer.c_str(), sendBufferLength, MPI_CHAR, masterNode, 0, MPI_COMM_WORLD); |
417 |
> |
// send the length of our buffer: |
418 |
> |
MPI::COMM_WORLD.Send(&sendBufferLength, 1, MPI::INT, masterNode, 0); |
419 |
> |
|
420 |
> |
// send our buffer: |
421 |
> |
MPI::COMM_WORLD.Send((void *)buffer.c_str(), sendBufferLength, |
422 |
> |
MPI::CHAR, masterNode, 0); |
423 |
|
} |
424 |
|
} |
425 |
|
} |
426 |
+ |
|
427 |
+ |
if (worldRank == masterNode) { |
428 |
+ |
os << " </StuntDoubles>\n"; |
429 |
+ |
} |
430 |
|
|
431 |
< |
#endif // is_mpi |
431 |
> |
if (doSiteData_) { |
432 |
> |
if (worldRank == masterNode) { |
433 |
> |
os << " <SiteData>\n"; |
434 |
> |
} |
435 |
> |
buffer.clear(); |
436 |
> |
for (mol = info_->beginMolecule(mi); mol != NULL; |
437 |
> |
mol = info_->nextMolecule(mi)) { |
438 |
> |
|
439 |
> |
for (sd = mol->beginIntegrableObject(ii); sd != NULL; |
440 |
> |
sd = mol->nextIntegrableObject(ii)) { |
441 |
> |
|
442 |
> |
int ioIndex = sd->getGlobalIntegrableObjectIndex(); |
443 |
> |
// do one for the IO itself |
444 |
> |
buffer += prepareSiteLine(sd, ioIndex, 0); |
445 |
|
|
446 |
+ |
if (sd->isRigidBody()) { |
447 |
+ |
|
448 |
+ |
RigidBody* rb = static_cast<RigidBody*>(sd); |
449 |
+ |
int siteIndex = 0; |
450 |
+ |
for (atom = rb->beginAtom(ai); atom != NULL; |
451 |
+ |
atom = rb->nextAtom(ai)) { |
452 |
+ |
buffer += prepareSiteLine(atom, ioIndex, siteIndex); |
453 |
+ |
siteIndex++; |
454 |
+ |
} |
455 |
+ |
} |
456 |
+ |
} |
457 |
+ |
} |
458 |
+ |
|
459 |
+ |
if (worldRank == masterNode) { |
460 |
+ |
os << buffer; |
461 |
+ |
|
462 |
+ |
for (int i = 1; i < nProc; ++i) { |
463 |
+ |
|
464 |
+ |
// tell processor i to start sending us data: |
465 |
+ |
MPI::COMM_WORLD.Bcast(&i, 1, MPI::INT, masterNode); |
466 |
+ |
|
467 |
+ |
// receive the length of the string buffer that was |
468 |
+ |
// prepared by processor i: |
469 |
+ |
int recvLength; |
470 |
+ |
MPI::COMM_WORLD.Recv(&recvLength, 1, MPI::INT, i, MPI::ANY_TAG, |
471 |
+ |
istatus); |
472 |
+ |
|
473 |
+ |
// create a buffer to receive the data |
474 |
+ |
char* recvBuffer = new char[recvLength]; |
475 |
+ |
if (recvBuffer == NULL) { |
476 |
+ |
} else { |
477 |
+ |
// receive the data: |
478 |
+ |
MPI::COMM_WORLD.Recv(recvBuffer, recvLength, MPI::CHAR, i, |
479 |
+ |
MPI::ANY_TAG, istatus); |
480 |
+ |
// send it to the file: |
481 |
+ |
os << recvBuffer; |
482 |
+ |
// get rid of the receive buffer: |
483 |
+ |
delete [] recvBuffer; |
484 |
+ |
} |
485 |
+ |
} |
486 |
+ |
} else { |
487 |
+ |
int sendBufferLength = buffer.size() + 1; |
488 |
+ |
int myturn = 0; |
489 |
+ |
for (int i = 1; i < nProc; ++i){ |
490 |
+ |
// wait for the master node to call our number: |
491 |
+ |
MPI::COMM_WORLD.Bcast(&myturn, 1, MPI::INT, masterNode); |
492 |
+ |
if (myturn == worldRank){ |
493 |
+ |
// send the length of our buffer: |
494 |
+ |
MPI::COMM_WORLD.Send(&sendBufferLength, 1, MPI::INT, masterNode, 0); |
495 |
+ |
// send our buffer: |
496 |
+ |
MPI::COMM_WORLD.Send((void *)buffer.c_str(), sendBufferLength, |
497 |
+ |
MPI::CHAR, masterNode, 0); |
498 |
+ |
} |
499 |
+ |
} |
500 |
+ |
} |
501 |
+ |
|
502 |
+ |
if (worldRank == masterNode) { |
503 |
+ |
os << " </SiteData>\n"; |
504 |
+ |
} |
505 |
+ |
} |
506 |
+ |
|
507 |
+ |
if (worldRank == masterNode) { |
508 |
+ |
os << " </Snapshot>\n"; |
509 |
+ |
os.flush(); |
510 |
+ |
} |
511 |
+ |
|
512 |
+ |
#endif // is_mpi |
513 |
+ |
|
514 |
|
} |
515 |
|
|
516 |
|
std::string DumpWriter::prepareDumpLine(StuntDouble* sd) { |