5 |
|
#ifdef IS_MPI |
6 |
|
#include <mpi.h> |
7 |
|
#include "mpiSimulation.hpp" |
8 |
+ |
#define TAKE_THIS_TAG 0 |
9 |
|
#endif //is_mpi |
10 |
|
|
11 |
|
#include "ReadWrite.hpp" |
17 |
|
|
18 |
|
DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ |
19 |
|
|
20 |
+ |
entry_plug = the_entry_plug; |
21 |
+ |
|
22 |
|
#ifdef IS_MPI |
23 |
|
if(worldRank == 0 ){ |
24 |
|
#endif // is_mpi |
25 |
|
|
26 |
< |
entry_plug = the_entry_plug; |
26 |
> |
|
27 |
|
|
28 |
|
strcpy( outName, entry_plug->sampleName ); |
29 |
|
|
27 |
– |
std::cerr << "Opening " << outName << " for dumping.\n"; |
28 |
– |
|
30 |
|
outFile.open(outName, ios::out | ios::trunc ); |
31 |
|
|
32 |
|
if( !outFile ){ |
42 |
|
|
43 |
|
#ifdef IS_MPI |
44 |
|
} |
45 |
+ |
|
46 |
+ |
sprintf( checkPointMsg, |
47 |
+ |
"Sucessfully opened output file for dumping.\n"); |
48 |
+ |
MPIcheckPoint(); |
49 |
|
#endif // is_mpi |
50 |
|
} |
51 |
|
|
65 |
|
void DumpWriter::writeDump( double currentTime ){ |
66 |
|
|
67 |
|
const int BUFFERSIZE = 2000; |
68 |
< |
char tempBuffer[500]; |
68 |
> |
char tempBuffer[BUFFERSIZE]; |
69 |
|
char writeLine[BUFFERSIZE]; |
70 |
|
|
71 |
|
int i; |
86 |
|
|
87 |
|
for( i=0; i<nAtoms; i++ ){ |
88 |
|
|
89 |
+ |
|
90 |
|
sprintf( tempBuffer, |
91 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
92 |
|
atoms[i]->getType(), |
181 |
|
} |
182 |
|
outFile.flush(); |
183 |
|
} |
184 |
+ |
|
185 |
+ |
sprintf( checkPointMsg, |
186 |
+ |
"Sucessfully wrote node 0's dump configuration.\n"); |
187 |
+ |
MPIcheckPoint(); |
188 |
|
|
189 |
|
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
190 |
|
procIndex++){ |
191 |
|
|
192 |
|
if( worldRank == 0 ){ |
193 |
< |
|
193 |
> |
|
194 |
|
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
195 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus); |
196 |
< |
|
195 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
196 |
> |
|
197 |
|
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD, istatus); |
199 |
< |
|
198 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
199 |
> |
|
200 |
|
// Make sure where node 0 is writing to, matches where the |
201 |
|
// receiving node expects it to be. |
202 |
< |
|
202 |
> |
|
203 |
|
if (masterIndex != nodeAtomsStart){ |
204 |
|
sendError = 1; |
205 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
205 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 |
|
MPI_COMM_WORLD); |
207 |
|
sprintf(painCave.errMsg, |
208 |
|
"DumpWriter error: atoms start index (%d) for " |
211 |
|
painCave.isFatal = 1; |
212 |
|
simError(); |
213 |
|
} |
214 |
< |
|
214 |
> |
|
215 |
|
sendError = 0; |
216 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
216 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 |
|
MPI_COMM_WORLD); |
218 |
< |
|
218 |
> |
|
219 |
|
// recieve the nodes writeLines |
220 |
< |
|
220 |
> |
|
221 |
|
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 |
< |
|
222 |
> |
|
223 |
|
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); |
225 |
< |
|
224 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 |
> |
|
226 |
|
outFile << writeLine; |
227 |
|
masterIndex++; |
228 |
|
} |
232 |
|
|
233 |
|
nodeAtomsStart = mpiSim->getMyAtomStart(); |
234 |
|
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
235 |
< |
|
236 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, |
235 |
> |
|
236 |
> |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 |
|
MPI_COMM_WORLD); |
238 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, |
238 |
> |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 |
|
MPI_COMM_WORLD); |
240 |
|
|
241 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, |
241 |
> |
sendError = -1; |
242 |
> |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 |
|
MPI_COMM_WORLD, istatus); |
244 |
+ |
|
245 |
|
if (sendError) MPIcheckPoint(); |
246 |
|
|
247 |
|
// send current node's configuration line by line. |
248 |
|
|
249 |
|
for( i=0; i<nAtoms; i++ ){ |
250 |
< |
|
250 |
> |
|
251 |
|
sprintf( tempBuffer, |
252 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
253 |
|
atoms[i]->getType(), |
256 |
|
atoms[i]->getZ(), |
257 |
|
atoms[i]->get_vx(), |
258 |
|
atoms[i]->get_vy(), |
259 |
< |
atoms[i]->get_vz()); |
259 |
> |
atoms[i]->get_vz()); // check here. |
260 |
|
strcpy( writeLine, tempBuffer ); |
261 |
|
|
262 |
|
if( atoms[i]->isDirectional() ){ |
278 |
|
else |
279 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
280 |
|
|
281 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, |
281 |
> |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
282 |
|
MPI_COMM_WORLD); |
283 |
|
} |
284 |
|
} |
404 |
|
|
405 |
|
masterIndex = 0; |
406 |
|
|
395 |
– |
std::cerr << "about to write node 0 aztoms. nAtoms = " << nAtoms << "\n"; |
396 |
– |
|
407 |
|
for( i=0; i<nAtoms; i++ ){ |
408 |
|
|
409 |
|
sprintf( tempBuffer, |
448 |
|
if( worldRank == 0 ){ |
449 |
|
|
450 |
|
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
451 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus); |
451 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
452 |
|
|
453 |
|
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
454 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD, istatus); |
454 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
455 |
|
|
456 |
|
// Make sure where node 0 is writing to, matches where the |
457 |
|
// receiving node expects it to be. |
458 |
|
|
459 |
|
if (masterIndex != nodeAtomsStart){ |
460 |
|
sendError = 1; |
461 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
461 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
462 |
|
MPI_COMM_WORLD); |
463 |
|
sprintf(painCave.errMsg, |
464 |
|
"DumpWriter error: atoms start index (%d) for " |
469 |
|
} |
470 |
|
|
471 |
|
sendError = 0; |
472 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG, |
472 |
> |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 |
|
MPI_COMM_WORLD); |
474 |
|
|
475 |
|
// recieve the nodes writeLines |
477 |
|
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 |
|
|
479 |
|
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 |
< |
MPI_ANY_TAG,MPI_COMM_WORLD,istatus ); |
480 |
> |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 |
|
|
482 |
|
finalOut << writeLine; |
483 |
|
masterIndex++; |
491 |
|
nodeAtomsStart = mpiSim->getMyAtomStart(); |
492 |
|
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
493 |
|
|
494 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG, |
494 |
> |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 |
|
MPI_COMM_WORLD); |
496 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG, |
496 |
> |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 |
|
MPI_COMM_WORLD); |
498 |
|
|
499 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG, |
499 |
> |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 |
|
MPI_COMM_WORLD, istatus); |
501 |
|
if (sendError) MPIcheckPoint(); |
502 |
|
|
534 |
|
else |
535 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
536 |
|
|
537 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG, |
537 |
> |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
538 |
|
MPI_COMM_WORLD); |
539 |
|
} |
540 |
|
} |