| 5 | 
  | 
#ifdef IS_MPI | 
| 6 | 
  | 
#include <mpi.h> | 
| 7 | 
  | 
#include "mpiSimulation.hpp" | 
| 8 | 
+ | 
#define TAKE_THIS_TAG 0 | 
| 9 | 
  | 
#endif //is_mpi | 
| 10 | 
  | 
 | 
| 11 | 
  | 
#include "ReadWrite.hpp" | 
| 42 | 
  | 
 | 
| 43 | 
  | 
#ifdef IS_MPI | 
| 44 | 
  | 
  } | 
| 45 | 
+ | 
 | 
| 46 | 
+ | 
  sprintf( checkPointMsg, | 
| 47 | 
+ | 
           "Sucessfully opened output file for dumping.\n"); | 
| 48 | 
+ | 
  MPIcheckPoint(); | 
| 49 | 
  | 
#endif // is_mpi | 
| 50 | 
  | 
} | 
| 51 | 
  | 
 | 
| 65 | 
  | 
void DumpWriter::writeDump( double currentTime ){ | 
| 66 | 
  | 
   | 
| 67 | 
  | 
  const int BUFFERSIZE = 2000; | 
| 68 | 
< | 
  char tempBuffer[500]; | 
| 68 | 
> | 
  char tempBuffer[BUFFERSIZE]; | 
| 69 | 
  | 
  char writeLine[BUFFERSIZE]; | 
| 70 | 
  | 
 | 
| 71 | 
  | 
  int i; | 
| 180 | 
  | 
    } | 
| 181 | 
  | 
    outFile.flush(); | 
| 182 | 
  | 
  } | 
| 183 | 
+ | 
 | 
| 184 | 
+ | 
  sprintf( checkPointMsg, | 
| 185 | 
+ | 
           "Sucessfully wrote node 0's dump configuration.\n"); | 
| 186 | 
+ | 
  MPIcheckPoint(); | 
| 187 | 
  | 
     | 
| 188 | 
  | 
  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); | 
| 189 | 
  | 
       procIndex++){ | 
| 190 | 
  | 
 | 
| 191 | 
  | 
    if( worldRank == 0 ){ | 
| 192 | 
< | 
         | 
| 192 | 
> | 
       | 
| 193 | 
  | 
      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, | 
| 194 | 
< | 
                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus); | 
| 195 | 
< | 
 | 
| 194 | 
> | 
                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); | 
| 195 | 
> | 
       | 
| 196 | 
  | 
      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, | 
| 197 | 
< | 
                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus); | 
| 198 | 
< | 
         | 
| 197 | 
> | 
                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); | 
| 198 | 
> | 
       | 
| 199 | 
  | 
      // Make sure where node 0 is writing to, matches where the | 
| 200 | 
  | 
      // receiving node expects it to be. | 
| 201 | 
< | 
         | 
| 201 | 
> | 
       | 
| 202 | 
  | 
      if (masterIndex != nodeAtomsStart){ | 
| 203 | 
  | 
        sendError = 1; | 
| 204 | 
< | 
        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, | 
| 204 | 
> | 
        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 205 | 
  | 
                          MPI_COMM_WORLD); | 
| 206 | 
  | 
        sprintf(painCave.errMsg, | 
| 207 | 
  | 
                "DumpWriter error: atoms start index (%d) for " | 
| 210 | 
  | 
        painCave.isFatal = 1; | 
| 211 | 
  | 
        simError(); | 
| 212 | 
  | 
      } | 
| 213 | 
< | 
         | 
| 213 | 
> | 
       | 
| 214 | 
  | 
      sendError = 0; | 
| 215 | 
< | 
      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, | 
| 215 | 
> | 
      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 216 | 
  | 
                        MPI_COMM_WORLD); | 
| 217 | 
< | 
 | 
| 217 | 
> | 
       | 
| 218 | 
  | 
      // recieve the nodes writeLines | 
| 219 | 
< | 
 | 
| 219 | 
> | 
       | 
| 220 | 
  | 
      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ | 
| 221 | 
< | 
           | 
| 221 | 
> | 
         | 
| 222 | 
  | 
        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, | 
| 223 | 
< | 
                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); | 
| 224 | 
< | 
 | 
| 223 | 
> | 
                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); | 
| 224 | 
> | 
         | 
| 225 | 
  | 
        outFile << writeLine; | 
| 226 | 
  | 
        masterIndex++; | 
| 227 | 
  | 
      } | 
| 232 | 
  | 
      nodeAtomsStart = mpiSim->getMyAtomStart(); | 
| 233 | 
  | 
      nodeAtomsEnd = mpiSim->getMyAtomEnd(); | 
| 234 | 
  | 
         | 
| 235 | 
< | 
      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, | 
| 235 | 
> | 
      fprintf( stderr, | 
| 236 | 
> | 
               "node %d: myatomStart-> %d; myatomEnd-> %d\n", | 
| 237 | 
> | 
               worldRank, nodeAtomsStart, nodeAtomsEnd ); | 
| 238 | 
> | 
 | 
| 239 | 
> | 
      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 240 | 
  | 
                        MPI_COMM_WORLD); | 
| 241 | 
< | 
      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, | 
| 241 | 
> | 
      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 242 | 
  | 
                        MPI_COMM_WORLD); | 
| 243 | 
  | 
         | 
| 244 | 
< | 
      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, | 
| 244 | 
> | 
      fprintf( stderr, "node %d: sent off the start and end\n", worldRank ); | 
| 245 | 
> | 
 | 
| 246 | 
> | 
      sendError = -1; | 
| 247 | 
> | 
      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 248 | 
  | 
                        MPI_COMM_WORLD, istatus); | 
| 249 | 
+ | 
 | 
| 250 | 
+ | 
      fprintf( stderr, "node %d: value of sendError is %d\n", worldRank, sendError ); | 
| 251 | 
+ | 
 | 
| 252 | 
  | 
      if (sendError) MPIcheckPoint(); | 
| 253 | 
  | 
 | 
| 254 | 
  | 
      // send current node's configuration line by line. | 
| 285 | 
  | 
        else | 
| 286 | 
  | 
          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 287 | 
  | 
           | 
| 288 | 
< | 
        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, | 
| 288 | 
> | 
        fprintf( stderr, | 
| 289 | 
> | 
                 "node %d: I'm sending the line:\n->%s\n", worldRank, writeLine ); | 
| 290 | 
> | 
         | 
| 291 | 
> | 
        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, | 
| 292 | 
  | 
                          MPI_COMM_WORLD); | 
| 293 | 
  | 
      } | 
| 294 | 
  | 
    } | 
| 413 | 
  | 
             << entry_plug->box_z << "\n"; | 
| 414 | 
  | 
     | 
| 415 | 
  | 
    masterIndex = 0; | 
| 416 | 
+ | 
     | 
| 417 | 
+ | 
    std::cerr << "about to write node 0 aztoms. nAtoms = " << nAtoms << "\n"; | 
| 418 | 
+ | 
     | 
| 419 | 
  | 
    for( i=0; i<nAtoms; i++ ){ | 
| 420 | 
  | 
       | 
| 421 | 
  | 
      sprintf( tempBuffer, | 
| 448 | 
  | 
      else | 
| 449 | 
  | 
        strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 450 | 
  | 
         | 
| 451 | 
< | 
      outFile << writeLine; | 
| 451 | 
> | 
      finalOut << writeLine; | 
| 452 | 
  | 
      masterIndex++; | 
| 453 | 
  | 
    } | 
| 454 | 
  | 
    finalOut.flush(); | 
| 460 | 
  | 
    if( worldRank == 0 ){ | 
| 461 | 
  | 
         | 
| 462 | 
  | 
      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, | 
| 463 | 
< | 
                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus); | 
| 463 | 
> | 
                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); | 
| 464 | 
  | 
 | 
| 465 | 
  | 
      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, | 
| 466 | 
< | 
                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus); | 
| 466 | 
> | 
                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); | 
| 467 | 
  | 
         | 
| 468 | 
  | 
      // Make sure where node 0 is writing to, matches where the | 
| 469 | 
  | 
      // receiving node expects it to be. | 
| 470 | 
  | 
         | 
| 471 | 
  | 
      if (masterIndex != nodeAtomsStart){ | 
| 472 | 
  | 
        sendError = 1; | 
| 473 | 
< | 
        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, | 
| 473 | 
> | 
        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 474 | 
  | 
                          MPI_COMM_WORLD); | 
| 475 | 
  | 
        sprintf(painCave.errMsg, | 
| 476 | 
  | 
                "DumpWriter error: atoms start index (%d) for " | 
| 481 | 
  | 
      } | 
| 482 | 
  | 
         | 
| 483 | 
  | 
      sendError = 0; | 
| 484 | 
< | 
      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, | 
| 484 | 
> | 
      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 485 | 
  | 
                        MPI_COMM_WORLD); | 
| 486 | 
  | 
 | 
| 487 | 
  | 
      // recieve the nodes writeLines | 
| 489 | 
  | 
      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ | 
| 490 | 
  | 
           | 
| 491 | 
  | 
        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, | 
| 492 | 
< | 
                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); | 
| 492 | 
> | 
                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); | 
| 493 | 
  | 
 | 
| 494 | 
  | 
        finalOut << writeLine; | 
| 495 | 
  | 
        masterIndex++; | 
| 503 | 
  | 
      nodeAtomsStart = mpiSim->getMyAtomStart(); | 
| 504 | 
  | 
      nodeAtomsEnd = mpiSim->getMyAtomEnd(); | 
| 505 | 
  | 
         | 
| 506 | 
< | 
      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, | 
| 506 | 
> | 
      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 507 | 
  | 
                        MPI_COMM_WORLD); | 
| 508 | 
< | 
      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, | 
| 508 | 
> | 
      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 509 | 
  | 
                        MPI_COMM_WORLD); | 
| 510 | 
  | 
         | 
| 511 | 
< | 
      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, | 
| 511 | 
> | 
      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 512 | 
  | 
                        MPI_COMM_WORLD, istatus); | 
| 513 | 
  | 
      if (sendError) MPIcheckPoint(); | 
| 514 | 
  | 
 | 
| 546 | 
  | 
        else | 
| 547 | 
  | 
          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 548 | 
  | 
           | 
| 549 | 
< | 
        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, | 
| 549 | 
> | 
        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, | 
| 550 | 
  | 
                          MPI_COMM_WORLD); | 
| 551 | 
  | 
      } | 
| 552 | 
  | 
    } |