5 |
|
#ifdef IS_MPI |
6 |
|
#include <mpi.h> |
7 |
|
#include "mpiSimulation.hpp" |
8 |
+ |
#define TAKE_THIS_TAG 0 |
9 |
|
#endif //is_mpi |
10 |
|
|
11 |
|
#include "ReadWrite.hpp" |
17 |
|
|
18 |
|
DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ |
19 |
|
|
20 |
+ |
entry_plug = the_entry_plug; |
21 |
+ |
|
22 |
|
#ifdef IS_MPI |
23 |
|
if(worldRank == 0 ){ |
24 |
|
#endif // is_mpi |
25 |
|
|
26 |
< |
entry_plug = the_entry_plug; |
26 |
> |
|
27 |
|
|
28 |
|
strcpy( outName, entry_plug->sampleName ); |
29 |
|
|
27 |
– |
std::cerr << "Opening " << outName << " for dumping.\n"; |
28 |
– |
|
30 |
|
outFile.open(outName, ios::out | ios::trunc ); |
31 |
|
|
32 |
|
if( !outFile ){ |
42 |
|
|
43 |
|
#ifdef IS_MPI |
44 |
|
} |
45 |
+ |
|
46 |
+ |
sprintf( checkPointMsg, |
47 |
+ |
"Sucessfully opened output file for dumping.\n"); |
48 |
+ |
MPIcheckPoint(); |
49 |
|
#endif // is_mpi |
50 |
|
} |
51 |
|
|
65 |
|
void DumpWriter::writeDump( double currentTime ){ |
66 |
|
|
67 |
|
const int BUFFERSIZE = 2000; |
68 |
< |
char tempBuffer[500]; |
68 |
> |
char tempBuffer[BUFFERSIZE]; |
69 |
|
char writeLine[BUFFERSIZE]; |
70 |
|
|
71 |
|
int i; |
86 |
|
|
87 |
|
for( i=0; i<nAtoms; i++ ){ |
88 |
|
|
89 |
+ |
|
90 |
|
sprintf( tempBuffer, |
91 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
92 |
|
atoms[i]->getType(), |
136 |
|
// write out header and node 0's coordinates |
137 |
|
|
138 |
|
if( worldRank == 0 ){ |
139 |
< |
outFile << entry_plug->mpiSim->getTotAtoms() << "\n"; |
139 |
> |
outFile << mpiSim->getTotAtoms() << "\n"; |
140 |
|
|
141 |
|
outFile << currentTime << "\t" |
142 |
|
<< entry_plug->box_x << "\t" |
176 |
|
else |
177 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
178 |
|
|
179 |
< |
outfile << writeLine; |
179 |
> |
outFile << writeLine; |
180 |
|
masterIndex++; |
181 |
|
} |
182 |
|
outFile.flush(); |
183 |
|
} |
184 |
+ |
|
185 |
+ |
sprintf( checkPointMsg, |
186 |
+ |
"Sucessfully wrote node 0's dump configuration.\n"); |
187 |
+ |
MPIcheckPoint(); |
188 |
|
|
189 |
< |
for (procIndex = 1; procIndex < entry_plug->mpiSim->getNumberProcessors(); |
189 |
> |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
190 |
|
procIndex++){ |
191 |
|
|
192 |
|
if( worldRank == 0 ){ |
193 |
< |
|
193 |
> |
|
194 |
|
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
195 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus); |
196 |
< |
|
195 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
196 |
> |
|
197 |
|
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD, istatus); |
199 |
< |
|
198 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
199 |
> |
|
200 |
|
// Make sure where node 0 is writing to, matches where the |
201 |
|
// receiving node expects it to be. |
202 |
< |
|
202 |
> |
|
203 |
|
if (masterIndex != nodeAtomsStart){ |
204 |
|
sendError = 1; |
205 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
205 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 |
|
MPI_COMM_WORLD); |
207 |
|
sprintf(painCave.errMsg, |
208 |
|
"DumpWriter error: atoms start index (%d) for " |
211 |
|
painCave.isFatal = 1; |
212 |
|
simError(); |
213 |
|
} |
214 |
< |
|
214 |
> |
|
215 |
|
sendError = 0; |
216 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
216 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 |
|
MPI_COMM_WORLD); |
218 |
< |
|
218 |
> |
|
219 |
|
// recieve the nodes writeLines |
220 |
< |
|
221 |
< |
for ( i = nodeAtomStart; i <= nodeAtomEnd, i++){ |
222 |
< |
|
223 |
< |
mpiErr = MPI_Recv(&read_buffer,BUFFERSIZE,MPI_CHAR,procIndex, |
224 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); |
225 |
< |
|
220 |
> |
|
221 |
> |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 |
> |
|
223 |
> |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 |
> |
|
226 |
|
outFile << writeLine; |
227 |
|
masterIndex++; |
228 |
|
} |
230 |
|
|
231 |
|
else if( worldRank == procIndex ){ |
232 |
|
|
233 |
< |
nodeAtomStart = entry_plug->mpiSim->getMyAtomStart(); |
234 |
< |
nodeAtomEnd = entry_plug->mpiSim->getMyAtomEnd(); |
235 |
< |
|
236 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, |
233 |
> |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
234 |
> |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
235 |
> |
|
236 |
> |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 |
|
MPI_COMM_WORLD); |
238 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, |
238 |
> |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 |
|
MPI_COMM_WORLD); |
240 |
|
|
241 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, |
241 |
> |
sendError = -1; |
242 |
> |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 |
|
MPI_COMM_WORLD, istatus); |
233 |
– |
if (sendError) mpiCheckpoint(); |
244 |
|
|
245 |
+ |
if (sendError) MPIcheckPoint(); |
246 |
+ |
|
247 |
|
// send current node's configuration line by line. |
248 |
|
|
249 |
|
for( i=0; i<nAtoms; i++ ){ |
250 |
< |
|
250 |
> |
|
251 |
|
sprintf( tempBuffer, |
252 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
253 |
|
atoms[i]->getType(), |
256 |
|
atoms[i]->getZ(), |
257 |
|
atoms[i]->get_vx(), |
258 |
|
atoms[i]->get_vy(), |
259 |
< |
atoms[i]->get_vz()); |
259 |
> |
atoms[i]->get_vz()); // check here. |
260 |
|
strcpy( writeLine, tempBuffer ); |
261 |
|
|
262 |
|
if( atoms[i]->isDirectional() ){ |
278 |
|
else |
279 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
280 |
|
|
281 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, |
281 |
> |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
282 |
|
MPI_COMM_WORLD); |
283 |
|
} |
284 |
|
} |
285 |
|
|
286 |
|
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
287 |
|
procIndex); |
288 |
< |
mpiCheckPoint(); |
288 |
> |
MPIcheckPoint(); |
289 |
|
} |
290 |
|
|
291 |
|
#endif // is_mpi |
330 |
|
#ifdef IS_MPI |
331 |
|
} |
332 |
|
|
333 |
< |
sprintf(checkPointMsg,"Opened file for final configuration\n",procIndex); |
334 |
< |
mpiCheckPoint(); |
333 |
> |
sprintf(checkPointMsg,"Opened file for final configuration\n"); |
334 |
> |
MPIcheckPoint(); |
335 |
|
|
336 |
|
#endif //is_mpi |
337 |
|
|
341 |
|
|
342 |
|
finalOut << nAtoms << "\n"; |
343 |
|
|
344 |
< |
finalOut << currentTime << "\t" |
345 |
< |
<< entry_plug->box_x << "\t" |
346 |
< |
<< entry_plug->box_y << "\t" |
335 |
< |
<< entry_plug->box_z << "\n"; |
344 |
> |
finalOut << entry_plug->box_x << "\t" |
345 |
> |
<< entry_plug->box_y << "\t" |
346 |
> |
<< entry_plug->box_z << "\n"; |
347 |
|
|
348 |
|
for( i=0; i<nAtoms; i++ ){ |
349 |
|
|
396 |
|
// write out header and node 0's coordinates |
397 |
|
|
398 |
|
if( worldRank == 0 ){ |
399 |
< |
finalOut << entry_plug->mpiSim->getTotAtoms() << "\n"; |
399 |
> |
finalOut << mpiSim->getTotAtoms() << "\n"; |
400 |
|
|
401 |
< |
finalOut << currentTime << "\t" |
402 |
< |
<< entry_plug->box_x << "\t" |
403 |
< |
<< entry_plug->box_y << "\t" |
404 |
< |
<< entry_plug->box_z << "\n"; |
394 |
< |
|
401 |
> |
finalOut << entry_plug->box_x << "\t" |
402 |
> |
<< entry_plug->box_y << "\t" |
403 |
> |
<< entry_plug->box_z << "\n"; |
404 |
> |
|
405 |
|
masterIndex = 0; |
406 |
+ |
|
407 |
|
for( i=0; i<nAtoms; i++ ){ |
408 |
|
|
409 |
|
sprintf( tempBuffer, |
436 |
|
else |
437 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
438 |
|
|
439 |
< |
outfile << writeLine; |
439 |
> |
finalOut << writeLine; |
440 |
|
masterIndex++; |
441 |
|
} |
442 |
|
finalOut.flush(); |
443 |
|
} |
444 |
|
|
445 |
< |
for (procIndex = 1; procIndex < entry_plug->mpiSim->getNumberProcessors(); |
445 |
> |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
446 |
|
procIndex++){ |
447 |
|
|
448 |
|
if( worldRank == 0 ){ |
449 |
|
|
450 |
|
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
451 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus); |
451 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
452 |
|
|
453 |
|
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
454 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD, istatus); |
454 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
455 |
|
|
456 |
|
// Make sure where node 0 is writing to, matches where the |
457 |
|
// receiving node expects it to be. |
458 |
|
|
459 |
|
if (masterIndex != nodeAtomsStart){ |
460 |
|
sendError = 1; |
461 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
461 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
462 |
|
MPI_COMM_WORLD); |
463 |
|
sprintf(painCave.errMsg, |
464 |
|
"DumpWriter error: atoms start index (%d) for " |
469 |
|
} |
470 |
|
|
471 |
|
sendError = 0; |
472 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
472 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 |
|
MPI_COMM_WORLD); |
474 |
|
|
475 |
|
// recieve the nodes writeLines |
476 |
|
|
477 |
< |
for ( i = nodeAtomStart; i <= nodeAtomEnd, i++){ |
477 |
> |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 |
|
|
479 |
< |
mpiErr = MPI_Recv(&read_buffer,BUFFERSIZE,MPI_CHAR,procIndex, |
480 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); |
479 |
> |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 |
|
|
482 |
|
finalOut << writeLine; |
483 |
|
masterIndex++; |
488 |
|
|
489 |
|
else if( worldRank == procIndex ){ |
490 |
|
|
491 |
< |
nodeAtomStart = entry_plug->mpiSim->getMyAtomStart(); |
492 |
< |
nodeAtomEnd = entry_plug->mpiSim->getMyAtomEnd(); |
491 |
> |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
492 |
> |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
493 |
|
|
494 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, |
494 |
> |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 |
|
MPI_COMM_WORLD); |
496 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, |
496 |
> |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 |
|
MPI_COMM_WORLD); |
498 |
|
|
499 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, |
499 |
> |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 |
|
MPI_COMM_WORLD, istatus); |
501 |
< |
if (sendError) mpiCheckpoint(); |
501 |
> |
if (sendError) MPIcheckPoint(); |
502 |
|
|
503 |
|
// send current node's configuration line by line. |
504 |
|
|
534 |
|
else |
535 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
536 |
|
|
537 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, |
537 |
> |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
538 |
|
MPI_COMM_WORLD); |
539 |
|
} |
540 |
|
} |
541 |
|
|
542 |
|
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
543 |
|
procIndex); |
544 |
< |
mpiCheckPoint(); |
544 |
> |
MPIcheckPoint(); |
545 |
|
} |
546 |
|
|
547 |
|
if( worldRank == 0 ) finalOut.close(); |