| 1 | #include <cstring> | 
| 2 | #include <iostream> | 
| 3 | #include <fstream> | 
| 4 |  | 
| 5 | #ifdef IS_MPI | 
| 6 | #include <mpi.h> | 
| 7 | #include "mpiSimulation.hpp" | 
| 8 | #define TAKE_THIS_TAG 0 | 
| 9 | #endif //is_mpi | 
| 10 |  | 
| 11 | #include "ReadWrite.hpp" | 
| 12 | #include "simError.h" | 
| 13 |  | 
| 14 |  | 
| 15 |  | 
| 16 |  | 
| 17 |  | 
| 18 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | 
| 19 |  | 
| 20 | entry_plug = the_entry_plug; | 
| 21 |  | 
| 22 | #ifdef IS_MPI | 
| 23 | if(worldRank == 0 ){ | 
| 24 | #endif // is_mpi | 
| 25 |  | 
| 26 |  | 
| 27 |  | 
| 28 | strcpy( outName, entry_plug->sampleName ); | 
| 29 |  | 
| 30 | outFile.open(outName, ios::out | ios::trunc ); | 
| 31 |  | 
| 32 | if( !outFile ){ | 
| 33 |  | 
| 34 | sprintf( painCave.errMsg, | 
| 35 | "Could not open \"%s\" for dump output.\n", | 
| 36 | outName); | 
| 37 | painCave.isFatal = 1; | 
| 38 | simError(); | 
| 39 | } | 
| 40 |  | 
| 41 | //outFile.setf( ios::scientific ); | 
| 42 |  | 
| 43 | #ifdef IS_MPI | 
| 44 | } | 
| 45 |  | 
| 46 | sprintf( checkPointMsg, | 
| 47 | "Sucessfully opened output file for dumping.\n"); | 
| 48 | MPIcheckPoint(); | 
| 49 | #endif // is_mpi | 
| 50 | } | 
| 51 |  | 
| 52 | DumpWriter::~DumpWriter( ){ | 
| 53 |  | 
| 54 | #ifdef IS_MPI | 
| 55 | if(worldRank == 0 ){ | 
| 56 | #endif // is_mpi | 
| 57 |  | 
| 58 | outFile.close(); | 
| 59 |  | 
| 60 | #ifdef IS_MPI | 
| 61 | } | 
| 62 | #endif // is_mpi | 
| 63 | } | 
| 64 |  | 
| 65 | void DumpWriter::writeDump( double currentTime ){ | 
| 66 |  | 
| 67 | const int BUFFERSIZE = 2000; | 
| 68 | char tempBuffer[BUFFERSIZE]; | 
| 69 | char writeLine[BUFFERSIZE]; | 
| 70 |  | 
| 71 | int i; | 
| 72 | double q[4]; | 
| 73 | DirectionalAtom* dAtom; | 
| 74 | int nAtoms = entry_plug->n_atoms; | 
| 75 | Atom** atoms = entry_plug->atoms; | 
| 76 |  | 
| 77 |  | 
| 78 | #ifndef IS_MPI | 
| 79 |  | 
| 80 | outFile << nAtoms << "\n"; | 
| 81 |  | 
| 82 | outFile << currentTime << "\t" | 
| 83 | << entry_plug->box_x << "\t" | 
| 84 | << entry_plug->box_y << "\t" | 
| 85 | << entry_plug->box_z << "\n"; | 
| 86 |  | 
| 87 | for( i=0; i<nAtoms; i++ ){ | 
| 88 |  | 
| 89 |  | 
| 90 | sprintf( tempBuffer, | 
| 91 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 92 | atoms[i]->getType(), | 
| 93 | atoms[i]->getX(), | 
| 94 | atoms[i]->getY(), | 
| 95 | atoms[i]->getZ(), | 
| 96 | atoms[i]->get_vx(), | 
| 97 | atoms[i]->get_vy(), | 
| 98 | atoms[i]->get_vz()); | 
| 99 | strcpy( writeLine, tempBuffer ); | 
| 100 |  | 
| 101 | if( atoms[i]->isDirectional() ){ | 
| 102 |  | 
| 103 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 104 | dAtom->getQ( q ); | 
| 105 |  | 
| 106 | sprintf( tempBuffer, | 
| 107 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 108 | q[0], | 
| 109 | q[1], | 
| 110 | q[2], | 
| 111 | q[3], | 
| 112 | dAtom->getJx(), | 
| 113 | dAtom->getJy(), | 
| 114 | dAtom->getJz()); | 
| 115 | strcat( writeLine, tempBuffer ); | 
| 116 | } | 
| 117 | else | 
| 118 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 119 |  | 
| 120 | outFile << writeLine; | 
| 121 | } | 
| 122 | outFile.flush(); | 
| 123 |  | 
| 124 | #else // is_mpi | 
| 125 |  | 
| 126 | int masterIndex; | 
| 127 | int nodeAtomsStart; | 
| 128 | int nodeAtomsEnd; | 
| 129 | int mpiErr; | 
| 130 | int sendError; | 
| 131 | int procIndex; | 
| 132 |  | 
| 133 | MPI_Status istatus[MPI_STATUS_SIZE]; | 
| 134 |  | 
| 135 |  | 
| 136 | // write out header and node 0's coordinates | 
| 137 |  | 
| 138 | if( worldRank == 0 ){ | 
| 139 | outFile << mpiSim->getTotAtoms() << "\n"; | 
| 140 |  | 
| 141 | outFile << currentTime << "\t" | 
| 142 | << entry_plug->box_x << "\t" | 
| 143 | << entry_plug->box_y << "\t" | 
| 144 | << entry_plug->box_z << "\n"; | 
| 145 |  | 
| 146 | masterIndex = 0; | 
| 147 | for( i=0; i<nAtoms; i++ ){ | 
| 148 |  | 
| 149 | sprintf( tempBuffer, | 
| 150 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 151 | atoms[i]->getType(), | 
| 152 | atoms[i]->getX(), | 
| 153 | atoms[i]->getY(), | 
| 154 | atoms[i]->getZ(), | 
| 155 | atoms[i]->get_vx(), | 
| 156 | atoms[i]->get_vy(), | 
| 157 | atoms[i]->get_vz()); | 
| 158 | strcpy( writeLine, tempBuffer ); | 
| 159 |  | 
| 160 | if( atoms[i]->isDirectional() ){ | 
| 161 |  | 
| 162 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 163 | dAtom->getQ( q ); | 
| 164 |  | 
| 165 | sprintf( tempBuffer, | 
| 166 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 167 | q[0], | 
| 168 | q[1], | 
| 169 | q[2], | 
| 170 | q[3], | 
| 171 | dAtom->getJx(), | 
| 172 | dAtom->getJy(), | 
| 173 | dAtom->getJz()); | 
| 174 | strcat( writeLine, tempBuffer ); | 
| 175 | } | 
| 176 | else | 
| 177 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 178 |  | 
| 179 | outFile << writeLine; | 
| 180 | masterIndex++; | 
| 181 | } | 
| 182 | outFile.flush(); | 
| 183 | } | 
| 184 |  | 
| 185 | sprintf( checkPointMsg, | 
| 186 | "Sucessfully wrote node 0's dump configuration.\n"); | 
| 187 | MPIcheckPoint(); | 
| 188 |  | 
| 189 | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); | 
| 190 | procIndex++){ | 
| 191 |  | 
| 192 | if( worldRank == 0 ){ | 
| 193 |  | 
| 194 | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, | 
| 195 | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); | 
| 196 |  | 
| 197 | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, | 
| 198 | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); | 
| 199 |  | 
| 200 | // Make sure where node 0 is writing to, matches where the | 
| 201 | // receiving node expects it to be. | 
| 202 |  | 
| 203 | if (masterIndex != nodeAtomsStart){ | 
| 204 | sendError = 1; | 
| 205 | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 206 | MPI_COMM_WORLD); | 
| 207 | sprintf(painCave.errMsg, | 
| 208 | "DumpWriter error: atoms start index (%d) for " | 
| 209 | "node %d not equal to master index (%d)", | 
| 210 | nodeAtomsStart,procIndex,masterIndex ); | 
| 211 | painCave.isFatal = 1; | 
| 212 | simError(); | 
| 213 | } | 
| 214 |  | 
| 215 | sendError = 0; | 
| 216 | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 217 | MPI_COMM_WORLD); | 
| 218 |  | 
| 219 | // recieve the nodes writeLines | 
| 220 |  | 
| 221 | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ | 
| 222 |  | 
| 223 | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, | 
| 224 | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); | 
| 225 |  | 
| 226 | outFile << writeLine; | 
| 227 | masterIndex++; | 
| 228 | } | 
| 229 | } | 
| 230 |  | 
| 231 | else if( worldRank == procIndex ){ | 
| 232 |  | 
| 233 | nodeAtomsStart = mpiSim->getMyAtomStart(); | 
| 234 | nodeAtomsEnd = mpiSim->getMyAtomEnd(); | 
| 235 |  | 
| 236 | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 237 | MPI_COMM_WORLD); | 
| 238 | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 239 | MPI_COMM_WORLD); | 
| 240 |  | 
| 241 | sendError = -1; | 
| 242 | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 243 | MPI_COMM_WORLD, istatus); | 
| 244 |  | 
| 245 | if (sendError) MPIcheckPoint(); | 
| 246 |  | 
| 247 | // send current node's configuration line by line. | 
| 248 |  | 
| 249 | for( i=0; i<nAtoms; i++ ){ | 
| 250 |  | 
| 251 | sprintf( tempBuffer, | 
| 252 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 253 | atoms[i]->getType(), | 
| 254 | atoms[i]->getX(), | 
| 255 | atoms[i]->getY(), | 
| 256 | atoms[i]->getZ(), | 
| 257 | atoms[i]->get_vx(), | 
| 258 | atoms[i]->get_vy(), | 
| 259 | atoms[i]->get_vz()); // check here. | 
| 260 | strcpy( writeLine, tempBuffer ); | 
| 261 |  | 
| 262 | if( atoms[i]->isDirectional() ){ | 
| 263 |  | 
| 264 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 265 | dAtom->getQ( q ); | 
| 266 |  | 
| 267 | sprintf( tempBuffer, | 
| 268 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 269 | q[0], | 
| 270 | q[1], | 
| 271 | q[2], | 
| 272 | q[3], | 
| 273 | dAtom->getJx(), | 
| 274 | dAtom->getJy(), | 
| 275 | dAtom->getJz()); | 
| 276 | strcat( writeLine, tempBuffer ); | 
| 277 | } | 
| 278 | else | 
| 279 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 280 |  | 
| 281 | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, | 
| 282 | MPI_COMM_WORLD); | 
| 283 | } | 
| 284 | } | 
| 285 |  | 
| 286 | sprintf(checkPointMsg,"Node %d sent dump configuration.", | 
| 287 | procIndex); | 
| 288 | MPIcheckPoint(); | 
| 289 | } | 
| 290 |  | 
| 291 | #endif // is_mpi | 
| 292 | } | 
| 293 |  | 
| 294 |  | 
| 295 |  | 
| 296 | void DumpWriter::writeFinal(){ | 
| 297 |  | 
| 298 |  | 
| 299 | const int BUFFERSIZE = 2000; | 
| 300 | char tempBuffer[500]; | 
| 301 | char writeLine[BUFFERSIZE]; | 
| 302 |  | 
| 303 | char finalName[500]; | 
| 304 |  | 
| 305 | int i; | 
| 306 | double q[4]; | 
| 307 | DirectionalAtom* dAtom; | 
| 308 | int nAtoms = entry_plug->n_atoms; | 
| 309 | Atom** atoms = entry_plug->atoms; | 
| 310 |  | 
| 311 | ofstream finalOut; | 
| 312 |  | 
| 313 | #ifdef IS_MPI | 
| 314 | if(worldRank == 0 ){ | 
| 315 | #endif // is_mpi | 
| 316 |  | 
| 317 | strcpy( finalName, entry_plug->finalName ); | 
| 318 |  | 
| 319 | finalOut.open( finalName, ios::out | ios::trunc ); | 
| 320 | if( !finalOut ){ | 
| 321 | sprintf( painCave.errMsg, | 
| 322 | "Could not open \"%s\" for final dump output.\n", | 
| 323 | finalName ); | 
| 324 | painCave.isFatal = 1; | 
| 325 | simError(); | 
| 326 | } | 
| 327 |  | 
| 328 | // finalOut.setf( ios::scientific ); | 
| 329 |  | 
| 330 | #ifdef IS_MPI | 
| 331 | } | 
| 332 |  | 
| 333 | sprintf(checkPointMsg,"Opened file for final configuration\n"); | 
| 334 | MPIcheckPoint(); | 
| 335 |  | 
| 336 | #endif //is_mpi | 
| 337 |  | 
| 338 |  | 
| 339 |  | 
| 340 | #ifndef IS_MPI | 
| 341 |  | 
| 342 | finalOut << nAtoms << "\n"; | 
| 343 |  | 
| 344 | finalOut << entry_plug->box_x << "\t" | 
| 345 | << entry_plug->box_y << "\t" | 
| 346 | << entry_plug->box_z << "\n"; | 
| 347 |  | 
| 348 | for( i=0; i<nAtoms; i++ ){ | 
| 349 |  | 
| 350 | sprintf( tempBuffer, | 
| 351 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 352 | atoms[i]->getType(), | 
| 353 | atoms[i]->getX(), | 
| 354 | atoms[i]->getY(), | 
| 355 | atoms[i]->getZ(), | 
| 356 | atoms[i]->get_vx(), | 
| 357 | atoms[i]->get_vy(), | 
| 358 | atoms[i]->get_vz()); | 
| 359 | strcpy( writeLine, tempBuffer ); | 
| 360 |  | 
| 361 | if( atoms[i]->isDirectional() ){ | 
| 362 |  | 
| 363 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 364 | dAtom->getQ( q ); | 
| 365 |  | 
| 366 | sprintf( tempBuffer, | 
| 367 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 368 | q[0], | 
| 369 | q[1], | 
| 370 | q[2], | 
| 371 | q[3], | 
| 372 | dAtom->getJx(), | 
| 373 | dAtom->getJy(), | 
| 374 | dAtom->getJz()); | 
| 375 | strcat( writeLine, tempBuffer ); | 
| 376 | } | 
| 377 | else | 
| 378 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 379 |  | 
| 380 | finalOut << writeLine; | 
| 381 | } | 
| 382 | finalOut.flush(); | 
| 383 |  | 
| 384 | #else // is_mpi | 
| 385 |  | 
| 386 | int masterIndex; | 
| 387 | int nodeAtomsStart; | 
| 388 | int nodeAtomsEnd; | 
| 389 | int mpiErr; | 
| 390 | int sendError; | 
| 391 | int procIndex; | 
| 392 |  | 
| 393 | MPI_Status istatus[MPI_STATUS_SIZE]; | 
| 394 |  | 
| 395 |  | 
| 396 | // write out header and node 0's coordinates | 
| 397 |  | 
| 398 | if( worldRank == 0 ){ | 
| 399 | finalOut << mpiSim->getTotAtoms() << "\n"; | 
| 400 |  | 
| 401 | finalOut << entry_plug->box_x << "\t" | 
| 402 | << entry_plug->box_y << "\t" | 
| 403 | << entry_plug->box_z << "\n"; | 
| 404 |  | 
| 405 | masterIndex = 0; | 
| 406 |  | 
| 407 | for( i=0; i<nAtoms; i++ ){ | 
| 408 |  | 
| 409 | sprintf( tempBuffer, | 
| 410 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 411 | atoms[i]->getType(), | 
| 412 | atoms[i]->getX(), | 
| 413 | atoms[i]->getY(), | 
| 414 | atoms[i]->getZ(), | 
| 415 | atoms[i]->get_vx(), | 
| 416 | atoms[i]->get_vy(), | 
| 417 | atoms[i]->get_vz()); | 
| 418 | strcpy( writeLine, tempBuffer ); | 
| 419 |  | 
| 420 | if( atoms[i]->isDirectional() ){ | 
| 421 |  | 
| 422 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 423 | dAtom->getQ( q ); | 
| 424 |  | 
| 425 | sprintf( tempBuffer, | 
| 426 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 427 | q[0], | 
| 428 | q[1], | 
| 429 | q[2], | 
| 430 | q[3], | 
| 431 | dAtom->getJx(), | 
| 432 | dAtom->getJy(), | 
| 433 | dAtom->getJz()); | 
| 434 | strcat( writeLine, tempBuffer ); | 
| 435 | } | 
| 436 | else | 
| 437 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 438 |  | 
| 439 | finalOut << writeLine; | 
| 440 | masterIndex++; | 
| 441 | } | 
| 442 | finalOut.flush(); | 
| 443 | } | 
| 444 |  | 
| 445 | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); | 
| 446 | procIndex++){ | 
| 447 |  | 
| 448 | if( worldRank == 0 ){ | 
| 449 |  | 
| 450 | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, | 
| 451 | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); | 
| 452 |  | 
| 453 | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, | 
| 454 | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); | 
| 455 |  | 
| 456 | // Make sure where node 0 is writing to, matches where the | 
| 457 | // receiving node expects it to be. | 
| 458 |  | 
| 459 | if (masterIndex != nodeAtomsStart){ | 
| 460 | sendError = 1; | 
| 461 | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 462 | MPI_COMM_WORLD); | 
| 463 | sprintf(painCave.errMsg, | 
| 464 | "DumpWriter error: atoms start index (%d) for " | 
| 465 | "node %d not equal to master index (%d)", | 
| 466 | nodeAtomsStart,procIndex,masterIndex ); | 
| 467 | painCave.isFatal = 1; | 
| 468 | simError(); | 
| 469 | } | 
| 470 |  | 
| 471 | sendError = 0; | 
| 472 | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, | 
| 473 | MPI_COMM_WORLD); | 
| 474 |  | 
| 475 | // recieve the nodes writeLines | 
| 476 |  | 
| 477 | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ | 
| 478 |  | 
| 479 | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, | 
| 480 | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); | 
| 481 |  | 
| 482 | finalOut << writeLine; | 
| 483 | masterIndex++; | 
| 484 | } | 
| 485 |  | 
| 486 | finalOut.flush(); | 
| 487 | } | 
| 488 |  | 
| 489 | else if( worldRank == procIndex ){ | 
| 490 |  | 
| 491 | nodeAtomsStart = mpiSim->getMyAtomStart(); | 
| 492 | nodeAtomsEnd = mpiSim->getMyAtomEnd(); | 
| 493 |  | 
| 494 | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 495 | MPI_COMM_WORLD); | 
| 496 | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 497 | MPI_COMM_WORLD); | 
| 498 |  | 
| 499 | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, | 
| 500 | MPI_COMM_WORLD, istatus); | 
| 501 | if (sendError) MPIcheckPoint(); | 
| 502 |  | 
| 503 | // send current node's configuration line by line. | 
| 504 |  | 
| 505 | for( i=0; i<nAtoms; i++ ){ | 
| 506 |  | 
| 507 | sprintf( tempBuffer, | 
| 508 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | 
| 509 | atoms[i]->getType(), | 
| 510 | atoms[i]->getX(), | 
| 511 | atoms[i]->getY(), | 
| 512 | atoms[i]->getZ(), | 
| 513 | atoms[i]->get_vx(), | 
| 514 | atoms[i]->get_vy(), | 
| 515 | atoms[i]->get_vz()); | 
| 516 | strcpy( writeLine, tempBuffer ); | 
| 517 |  | 
| 518 | if( atoms[i]->isDirectional() ){ | 
| 519 |  | 
| 520 | dAtom = (DirectionalAtom *)atoms[i]; | 
| 521 | dAtom->getQ( q ); | 
| 522 |  | 
| 523 | sprintf( tempBuffer, | 
| 524 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | 
| 525 | q[0], | 
| 526 | q[1], | 
| 527 | q[2], | 
| 528 | q[3], | 
| 529 | dAtom->getJx(), | 
| 530 | dAtom->getJy(), | 
| 531 | dAtom->getJz()); | 
| 532 | strcat( writeLine, tempBuffer ); | 
| 533 | } | 
| 534 | else | 
| 535 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | 
| 536 |  | 
| 537 | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, | 
| 538 | MPI_COMM_WORLD); | 
| 539 | } | 
| 540 | } | 
| 541 |  | 
| 542 | sprintf(checkPointMsg,"Node %d sent dump configuration.", | 
| 543 | procIndex); | 
| 544 | MPIcheckPoint(); | 
| 545 | } | 
| 546 |  | 
| 547 | if( worldRank == 0 ) finalOut.close(); | 
| 548 |  | 
| 549 |  | 
| 550 | #endif // is_mpi | 
| 551 | } |