1 |
+ |
#define _LARGEFILE_SOURCE64 |
2 |
|
#define _FILE_OFFSET_BITS 64 |
3 |
|
|
4 |
|
#include <string.h> |
5 |
|
#include <iostream> |
6 |
|
#include <fstream> |
7 |
+ |
#include <algorithm> |
8 |
+ |
#include <utility> |
9 |
|
|
10 |
|
#ifdef IS_MPI |
11 |
|
#include <mpi.h> |
12 |
|
#include "mpiSimulation.hpp" |
10 |
– |
#define TAKE_THIS_TAG_CHAR 15 |
11 |
– |
#define TAKE_THIS_TAG_INT 20 |
13 |
|
|
14 |
|
namespace dWrite{ |
15 |
< |
void nodeZeroError( void ); |
15 |
< |
void anonymousNodeDie( void ); |
15 |
> |
void DieDieDie( void ); |
16 |
|
} |
17 |
|
|
18 |
|
using namespace dWrite; |
29 |
|
if(worldRank == 0 ){ |
30 |
|
#endif // is_mpi |
31 |
|
|
32 |
< |
strcpy( outName, entry_plug->sampleName ); |
32 |
> |
dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc ); |
33 |
|
|
34 |
< |
outFile.open(outName, ios::out | ios::trunc ); |
34 |
> |
if( !dumpFile ){ |
35 |
|
|
36 |
– |
if( !outFile ){ |
37 |
– |
|
36 |
|
sprintf( painCave.errMsg, |
37 |
|
"Could not open \"%s\" for dump output.\n", |
38 |
< |
outName); |
38 |
> |
entry_plug->sampleName); |
39 |
|
painCave.isFatal = 1; |
40 |
|
simError(); |
41 |
|
} |
42 |
|
|
45 |
– |
//outFile.setf( ios::scientific ); |
46 |
– |
|
43 |
|
#ifdef IS_MPI |
44 |
|
} |
45 |
|
|
46 |
+ |
//sort the local atoms by global index |
47 |
+ |
sortByGlobalIndex(); |
48 |
+ |
|
49 |
|
sprintf( checkPointMsg, |
50 |
|
"Sucessfully opened output file for dumping.\n"); |
51 |
|
MPIcheckPoint(); |
58 |
|
if(worldRank == 0 ){ |
59 |
|
#endif // is_mpi |
60 |
|
|
61 |
< |
outFile.close(); |
61 |
> |
dumpFile.close(); |
62 |
|
|
63 |
|
#ifdef IS_MPI |
64 |
|
} |
65 |
|
#endif // is_mpi |
66 |
|
} |
67 |
|
|
68 |
< |
void DumpWriter::writeDump( double currentTime ){ |
68 |
> |
#ifdef IS_MPI |
69 |
|
|
70 |
+ |
/** |
71 |
+ |
* A hook function to load balancing |
72 |
+ |
*/ |
73 |
+ |
|
74 |
+ |
void DumpWriter::update(){ |
75 |
+ |
sortByGlobalIndex(); |
76 |
+ |
} |
77 |
+ |
|
78 |
+ |
/** |
79 |
+ |
* Auxiliary sorting function |
80 |
+ |
*/ |
81 |
+ |
|
82 |
+ |
bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){ |
83 |
+ |
return p1.second < p2.second; |
84 |
+ |
} |
85 |
+ |
|
86 |
+ |
/** |
87 |
+ |
* Sorting the local index by global index |
88 |
+ |
*/ |
89 |
+ |
|
90 |
+ |
void DumpWriter::sortByGlobalIndex(){ |
91 |
+ |
Atom** atoms = entry_plug->atoms; |
92 |
+ |
|
93 |
+ |
indexArray.clear(); |
94 |
+ |
|
95 |
+ |
for(int i = 0; i < mpiSim->getMyNlocal();i++) |
96 |
+ |
indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex())); |
97 |
+ |
|
98 |
+ |
sort(indexArray.begin(), indexArray.end(), indexSortingCriterion); |
99 |
+ |
} |
100 |
+ |
|
101 |
+ |
#endif |
102 |
+ |
|
103 |
+ |
void DumpWriter::writeDump(double currentTime){ |
104 |
+ |
|
105 |
+ |
ofstream finalOut; |
106 |
+ |
vector<ofstream*> fileStreams; |
107 |
+ |
|
108 |
+ |
#ifdef IS_MPI |
109 |
+ |
if(worldRank == 0 ){ |
110 |
+ |
#endif |
111 |
+ |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
112 |
+ |
if( !finalOut ){ |
113 |
+ |
sprintf( painCave.errMsg, |
114 |
+ |
"Could not open \"%s\" for final dump output.\n", |
115 |
+ |
entry_plug->finalName ); |
116 |
+ |
painCave.isFatal = 1; |
117 |
+ |
simError(); |
118 |
+ |
} |
119 |
+ |
#ifdef IS_MPI |
120 |
+ |
} |
121 |
+ |
#endif // is_mpi |
122 |
+ |
|
123 |
+ |
fileStreams.push_back(&finalOut); |
124 |
+ |
fileStreams.push_back(&dumpFile); |
125 |
+ |
|
126 |
+ |
writeFrame(fileStreams, currentTime); |
127 |
+ |
|
128 |
+ |
#ifdef IS_MPI |
129 |
+ |
finalOut.close(); |
130 |
+ |
#endif |
131 |
+ |
|
132 |
+ |
} |
133 |
+ |
|
134 |
+ |
void DumpWriter::writeFinal(double currentTime){ |
135 |
+ |
|
136 |
+ |
ofstream finalOut; |
137 |
+ |
vector<ofstream*> fileStreams; |
138 |
+ |
|
139 |
+ |
#ifdef IS_MPI |
140 |
+ |
if(worldRank == 0 ){ |
141 |
+ |
#endif // is_mpi |
142 |
+ |
|
143 |
+ |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
144 |
+ |
|
145 |
+ |
if( !finalOut ){ |
146 |
+ |
sprintf( painCave.errMsg, |
147 |
+ |
"Could not open \"%s\" for final dump output.\n", |
148 |
+ |
entry_plug->finalName ); |
149 |
+ |
painCave.isFatal = 1; |
150 |
+ |
simError(); |
151 |
+ |
} |
152 |
+ |
|
153 |
+ |
#ifdef IS_MPI |
154 |
+ |
} |
155 |
+ |
#endif // is_mpi |
156 |
+ |
|
157 |
+ |
fileStreams.push_back(&finalOut); |
158 |
+ |
writeFrame(fileStreams, currentTime); |
159 |
+ |
|
160 |
+ |
#ifdef IS_MPI |
161 |
+ |
finalOut.close(); |
162 |
+ |
#endif |
163 |
+ |
|
164 |
+ |
} |
165 |
+ |
|
166 |
+ |
void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){ |
167 |
+ |
|
168 |
|
const int BUFFERSIZE = 2000; |
169 |
< |
char tempBuffer[BUFFERSIZE]; |
169 |
> |
const int MINIBUFFERSIZE = 100; |
170 |
> |
|
171 |
> |
char tempBuffer[BUFFERSIZE]; |
172 |
|
char writeLine[BUFFERSIZE]; |
173 |
|
|
174 |
< |
int i; |
174 |
> |
int i, k; |
175 |
> |
|
176 |
|
#ifdef IS_MPI |
177 |
< |
int j, which_node, done, which_atom, local_index; |
177 |
> |
|
178 |
> |
/********************************************************************* |
179 |
> |
* Documentation? You want DOCUMENTATION? |
180 |
> |
* |
181 |
> |
* Why all the potatoes below? |
182 |
> |
* |
183 |
> |
* To make a long story short, the original version of DumpWriter |
184 |
> |
* worked in the most inefficient way possible. Node 0 would |
185 |
> |
* poke each of the node for an individual atom's formatted data |
186 |
> |
* as node 0 worked its way down the global index. This was particularly |
187 |
> |
* inefficient since the method blocked all processors at every atom |
188 |
> |
* (and did it twice!). |
189 |
> |
* |
190 |
> |
* An intermediate version of DumpWriter could be described from Node |
191 |
> |
* zero's perspective as follows: |
192 |
> |
* |
193 |
> |
* 1) Have 100 of your friends stand in a circle. |
194 |
> |
* 2) When you say go, have all of them start tossing potatoes at |
195 |
> |
* you (one at a time). |
196 |
> |
* 3) Catch the potatoes. |
197 |
> |
* |
198 |
> |
* It was an improvement, but MPI has buffers and caches that could |
199 |
> |
* best be described in this analogy as "potato nets", so there's no |
200 |
> |
* need to block the processors atom-by-atom. |
201 |
> |
* |
202 |
> |
* This new and improved DumpWriter works in an even more efficient |
203 |
> |
* way: |
204 |
> |
* |
205 |
> |
* 1) Have 100 of your friend stand in a circle. |
206 |
> |
* 2) When you say go, have them start tossing 5-pound bags of |
207 |
> |
* potatoes at you. |
208 |
> |
* 3) Once you've caught a friend's bag of potatoes, |
209 |
> |
* toss them a spud to let them know they can toss another bag. |
210 |
> |
* |
211 |
> |
* How's THAT for documentation? |
212 |
> |
* |
213 |
> |
*********************************************************************/ |
214 |
> |
|
215 |
> |
int *potatoes; |
216 |
> |
int myPotato; |
217 |
> |
|
218 |
> |
int nProc; |
219 |
> |
int j, which_node, done, which_atom, local_index, currentIndex; |
220 |
> |
double atomData6[6]; |
221 |
> |
double atomData13[13]; |
222 |
> |
int isDirectional; |
223 |
> |
char* atomTypeString; |
224 |
> |
char MPIatomTypeString[MINIBUFFERSIZE]; |
225 |
> |
|
226 |
|
#else //is_mpi |
227 |
|
int nAtoms = entry_plug->n_atoms; |
228 |
|
#endif //is_mpi |
232 |
|
Atom** atoms = entry_plug->atoms; |
233 |
|
double pos[3], vel[3]; |
234 |
|
|
87 |
– |
|
88 |
– |
// write current frame to the eor file |
89 |
– |
|
90 |
– |
this->writeFinal( currentTime ); |
91 |
– |
|
235 |
|
#ifndef IS_MPI |
236 |
+ |
|
237 |
+ |
for(k = 0; k < outFile.size(); k++){ |
238 |
+ |
*outFile[k] << nAtoms << "\n"; |
239 |
|
|
240 |
< |
outFile << nAtoms << "\n"; |
240 |
> |
*outFile[k] << currentTime << ";\t" |
241 |
> |
<< entry_plug->Hmat[0][0] << "\t" |
242 |
> |
<< entry_plug->Hmat[1][0] << "\t" |
243 |
> |
<< entry_plug->Hmat[2][0] << ";\t" |
244 |
> |
|
245 |
> |
<< entry_plug->Hmat[0][1] << "\t" |
246 |
> |
<< entry_plug->Hmat[1][1] << "\t" |
247 |
> |
<< entry_plug->Hmat[2][1] << ";\t" |
248 |
|
|
249 |
< |
outFile << currentTime << ";\t" |
250 |
< |
<< entry_plug->Hmat[0][0] << "\t" |
251 |
< |
<< entry_plug->Hmat[1][0] << "\t" |
99 |
< |
<< entry_plug->Hmat[2][0] << ";\t" |
249 |
> |
<< entry_plug->Hmat[0][2] << "\t" |
250 |
> |
<< entry_plug->Hmat[1][2] << "\t" |
251 |
> |
<< entry_plug->Hmat[2][2] << ";"; |
252 |
|
|
253 |
< |
<< entry_plug->Hmat[0][1] << "\t" |
254 |
< |
<< entry_plug->Hmat[1][1] << "\t" |
255 |
< |
<< entry_plug->Hmat[2][1] << ";\t" |
256 |
< |
|
105 |
< |
<< entry_plug->Hmat[0][2] << "\t" |
106 |
< |
<< entry_plug->Hmat[1][2] << "\t" |
107 |
< |
<< entry_plug->Hmat[2][2] << ";"; |
108 |
< |
//write out additional parameters, such as chi and eta |
109 |
< |
outFile << entry_plug->the_integrator->getAdditionalParameters(); |
110 |
< |
outFile << endl; |
111 |
< |
|
253 |
> |
//write out additional parameters, such as chi and eta |
254 |
> |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
255 |
> |
} |
256 |
> |
|
257 |
|
for( i=0; i<nAtoms; i++ ){ |
258 |
|
|
259 |
|
atoms[i]->getPos(pos); |
289 |
|
else |
290 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
291 |
|
|
292 |
< |
outFile << writeLine; |
292 |
> |
for(k = 0; k < outFile.size(); k++) |
293 |
> |
*outFile[k] << writeLine; |
294 |
|
} |
149 |
– |
outFile.flush(); |
295 |
|
|
296 |
|
#else // is_mpi |
297 |
|
|
298 |
< |
// first thing first, suspend fatalities. |
299 |
< |
painCave.isEventLoop = 1; |
298 |
> |
/* code to find maximum tag value */ |
299 |
> |
|
300 |
> |
int *tagub, flag, MAXTAG; |
301 |
> |
MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag); |
302 |
> |
if (flag) { |
303 |
> |
MAXTAG = *tagub; |
304 |
> |
} else { |
305 |
> |
MAXTAG = 32767; |
306 |
> |
} |
307 |
|
|
156 |
– |
int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
308 |
|
int haveError; |
309 |
|
|
310 |
|
MPI_Status istatus; |
313 |
|
// write out header and node 0's coordinates |
314 |
|
|
315 |
|
if( worldRank == 0 ){ |
165 |
– |
outFile << mpiSim->getTotAtoms() << "\n"; |
316 |
|
|
317 |
< |
outFile << currentTime << ";\t" |
168 |
< |
<< entry_plug->Hmat[0][0] << "\t" |
169 |
< |
<< entry_plug->Hmat[1][0] << "\t" |
170 |
< |
<< entry_plug->Hmat[2][0] << ";\t" |
317 |
> |
// Node 0 needs a list of the magic potatoes for each processor; |
318 |
|
|
319 |
< |
<< entry_plug->Hmat[0][1] << "\t" |
320 |
< |
<< entry_plug->Hmat[1][1] << "\t" |
174 |
< |
<< entry_plug->Hmat[2][1] << ";\t" |
319 |
> |
nProc = mpiSim->getNumberProcessors(); |
320 |
> |
potatoes = new int[nProc]; |
321 |
|
|
322 |
< |
<< entry_plug->Hmat[0][2] << "\t" |
323 |
< |
<< entry_plug->Hmat[1][2] << "\t" |
324 |
< |
<< entry_plug->Hmat[2][2] << ";"; |
322 |
> |
//write out the comment lines |
323 |
> |
for (i = 0; i < nProc; i++) |
324 |
> |
potatoes[i] = 0; |
325 |
> |
|
326 |
> |
for(k = 0; k < outFile.size(); k++){ |
327 |
> |
*outFile[k] << mpiSim->getTotAtoms() << "\n"; |
328 |
|
|
329 |
< |
outFile << entry_plug->the_integrator->getAdditionalParameters(); |
330 |
< |
outFile << endl; |
331 |
< |
outFile.flush(); |
329 |
> |
*outFile[k] << currentTime << ";\t" |
330 |
> |
<< entry_plug->Hmat[0][0] << "\t" |
331 |
> |
<< entry_plug->Hmat[1][0] << "\t" |
332 |
> |
<< entry_plug->Hmat[2][0] << ";\t" |
333 |
> |
|
334 |
> |
<< entry_plug->Hmat[0][1] << "\t" |
335 |
> |
<< entry_plug->Hmat[1][1] << "\t" |
336 |
> |
<< entry_plug->Hmat[2][1] << ";\t" |
337 |
> |
|
338 |
> |
<< entry_plug->Hmat[0][2] << "\t" |
339 |
> |
<< entry_plug->Hmat[1][2] << "\t" |
340 |
> |
<< entry_plug->Hmat[2][2] << ";"; |
341 |
> |
|
342 |
> |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
343 |
> |
} |
344 |
> |
|
345 |
> |
currentIndex = 0; |
346 |
> |
|
347 |
|
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
348 |
+ |
|
349 |
|
// Get the Node number which has this atom; |
350 |
< |
|
350 |
> |
|
351 |
|
which_node = AtomToProcMap[i]; |
352 |
+ |
|
353 |
+ |
if (which_node != 0) { |
354 |
|
|
355 |
< |
if (which_node == 0 ) { |
355 |
> |
if (potatoes[which_node] + 3 >= MAXTAG) { |
356 |
> |
// The potato was going to exceed the maximum value, |
357 |
> |
// so wrap this processor potato back to 0: |
358 |
|
|
359 |
< |
haveError = 0; |
360 |
< |
which_atom = i; |
361 |
< |
local_index=-1; |
193 |
< |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
194 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
359 |
> |
potatoes[which_node] = 0; |
360 |
> |
MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD); |
361 |
> |
|
362 |
|
} |
196 |
– |
if (local_index != -1) { |
197 |
– |
//format the line |
363 |
|
|
364 |
+ |
myPotato = potatoes[which_node]; |
365 |
+ |
|
366 |
+ |
MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node, |
367 |
+ |
myPotato, MPI_COMM_WORLD, &istatus); |
368 |
+ |
|
369 |
+ |
atomTypeString = MPIatomTypeString; |
370 |
+ |
|
371 |
+ |
myPotato++; |
372 |
+ |
|
373 |
+ |
MPI_Recv(&isDirectional, 1, MPI_INT, which_node, |
374 |
+ |
myPotato, MPI_COMM_WORLD, &istatus); |
375 |
+ |
|
376 |
+ |
myPotato++; |
377 |
+ |
|
378 |
+ |
if (isDirectional) { |
379 |
+ |
MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node, |
380 |
+ |
myPotato, MPI_COMM_WORLD, &istatus); |
381 |
+ |
} else { |
382 |
+ |
MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node, |
383 |
+ |
myPotato, MPI_COMM_WORLD, &istatus); |
384 |
+ |
} |
385 |
+ |
|
386 |
+ |
myPotato++; |
387 |
+ |
potatoes[which_node] = myPotato; |
388 |
+ |
|
389 |
+ |
} else { |
390 |
+ |
|
391 |
+ |
haveError = 0; |
392 |
+ |
which_atom = i; |
393 |
+ |
|
394 |
+ |
local_index = indexArray[currentIndex].first; |
395 |
+ |
|
396 |
+ |
if (which_atom == indexArray[currentIndex].second) { |
397 |
+ |
|
398 |
+ |
atomTypeString = atoms[local_index]->getType(); |
399 |
+ |
|
400 |
|
atoms[local_index]->getPos(pos); |
401 |
< |
atoms[local_index]->getVel(vel); |
401 |
> |
atoms[local_index]->getVel(vel); |
402 |
> |
|
403 |
> |
atomData6[0] = pos[0]; |
404 |
> |
atomData6[1] = pos[1]; |
405 |
> |
atomData6[2] = pos[2]; |
406 |
|
|
407 |
< |
sprintf( tempBuffer, |
408 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
409 |
< |
atoms[local_index]->getType(), |
410 |
< |
pos[0], |
411 |
< |
pos[1], |
207 |
< |
pos[2], |
208 |
< |
vel[0], |
209 |
< |
vel[1], |
210 |
< |
vel[2]); // check here. |
211 |
< |
strcpy( writeLine, tempBuffer ); |
407 |
> |
atomData6[3] = vel[0]; |
408 |
> |
atomData6[4] = vel[1]; |
409 |
> |
atomData6[5] = vel[2]; |
410 |
> |
|
411 |
> |
isDirectional = 0; |
412 |
|
|
413 |
|
if( atoms[local_index]->isDirectional() ){ |
414 |
|
|
415 |
+ |
isDirectional = 1; |
416 |
+ |
|
417 |
|
dAtom = (DirectionalAtom *)atoms[local_index]; |
418 |
|
dAtom->getQ( q ); |
419 |
|
|
420 |
< |
sprintf( tempBuffer, |
421 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
422 |
< |
q[0], |
423 |
< |
q[1], |
424 |
< |
q[2], |
425 |
< |
q[3], |
426 |
< |
dAtom->getJx(), |
427 |
< |
dAtom->getJy(), |
428 |
< |
dAtom->getJz()); |
429 |
< |
strcat( writeLine, tempBuffer ); |
430 |
< |
|
431 |
< |
} |
432 |
< |
else |
433 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
232 |
< |
} |
233 |
< |
else { |
420 |
> |
for (int j = 0; j < 6 ; j++) |
421 |
> |
atomData13[j] = atomData6[j]; |
422 |
> |
|
423 |
> |
atomData13[6] = q[0]; |
424 |
> |
atomData13[7] = q[1]; |
425 |
> |
atomData13[8] = q[2]; |
426 |
> |
atomData13[9] = q[3]; |
427 |
> |
|
428 |
> |
atomData13[10] = dAtom->getJx(); |
429 |
> |
atomData13[11] = dAtom->getJy(); |
430 |
> |
atomData13[12] = dAtom->getJz(); |
431 |
> |
} |
432 |
> |
|
433 |
> |
} else { |
434 |
|
sprintf(painCave.errMsg, |
435 |
< |
"Atom %d not found on processor %d\n", |
436 |
< |
i, worldRank ); |
435 |
> |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
436 |
> |
which_atom, worldRank, currentIndex, local_index ); |
437 |
|
haveError= 1; |
438 |
|
simError(); |
439 |
|
} |
440 |
< |
|
441 |
< |
if(haveError) nodeZeroError(); |
442 |
< |
|
440 |
> |
|
441 |
> |
if(haveError) DieDieDie(); |
442 |
> |
|
443 |
> |
currentIndex++; |
444 |
|
} |
445 |
< |
else { |
446 |
< |
myStatus = 1; |
447 |
< |
MPI_Send(&myStatus, 1, MPI_INT, which_node, |
448 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
449 |
< |
MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
450 |
< |
MPI_COMM_WORLD); |
451 |
< |
MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
452 |
< |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
453 |
< |
MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
454 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
455 |
< |
|
456 |
< |
if(!myStatus) nodeZeroError(); |
457 |
< |
|
445 |
> |
// If we've survived to here, format the line: |
446 |
> |
|
447 |
> |
if (!isDirectional) { |
448 |
> |
|
449 |
> |
sprintf( writeLine, |
450 |
> |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
451 |
> |
atomTypeString, |
452 |
> |
atomData6[0], |
453 |
> |
atomData6[1], |
454 |
> |
atomData6[2], |
455 |
> |
atomData6[3], |
456 |
> |
atomData6[4], |
457 |
> |
atomData6[5]); |
458 |
> |
|
459 |
> |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
460 |
> |
|
461 |
> |
} else { |
462 |
> |
|
463 |
> |
sprintf( writeLine, |
464 |
> |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
465 |
> |
atomTypeString, |
466 |
> |
atomData13[0], |
467 |
> |
atomData13[1], |
468 |
> |
atomData13[2], |
469 |
> |
atomData13[3], |
470 |
> |
atomData13[4], |
471 |
> |
atomData13[5], |
472 |
> |
atomData13[6], |
473 |
> |
atomData13[7], |
474 |
> |
atomData13[8], |
475 |
> |
atomData13[9], |
476 |
> |
atomData13[10], |
477 |
> |
atomData13[11], |
478 |
> |
atomData13[12]); |
479 |
> |
|
480 |
|
} |
481 |
< |
|
482 |
< |
outFile << writeLine; |
483 |
< |
outFile.flush(); |
481 |
> |
|
482 |
> |
for(k = 0; k < outFile.size(); k++) |
483 |
> |
*outFile[k] << writeLine; |
484 |
|
} |
485 |
< |
|
486 |
< |
// kill everyone off: |
487 |
< |
myStatus = -1; |
488 |
< |
for (j = 1; j < mpiSim->getNumberProcessors(); j++) { |
489 |
< |
MPI_Send(&myStatus, 1, MPI_INT, j, |
490 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
491 |
< |
} |
492 |
< |
|
485 |
> |
|
486 |
> |
for(k = 0; k < outFile.size(); k++) |
487 |
> |
outFile[k]->flush(); |
488 |
> |
|
489 |
> |
sprintf( checkPointMsg, |
490 |
> |
"Sucessfully took a dump.\n"); |
491 |
> |
|
492 |
> |
MPIcheckPoint(); |
493 |
> |
|
494 |
> |
delete[] potatoes; |
495 |
> |
|
496 |
|
} else { |
497 |
|
|
498 |
< |
done = 0; |
273 |
< |
while (!done) { |
498 |
> |
// worldRank != 0, so I'm a remote node. |
499 |
|
|
500 |
< |
MPI_Recv(&myStatus, 1, MPI_INT, 0, |
276 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
500 |
> |
// Set my magic potato to 0: |
501 |
|
|
502 |
< |
if(!myStatus) anonymousNodeDie(); |
503 |
< |
|
504 |
< |
if(myStatus < 0) break; |
505 |
< |
|
506 |
< |
MPI_Recv(&which_atom, 1, MPI_INT, 0, |
507 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
502 |
> |
myPotato = 0; |
503 |
> |
currentIndex = 0; |
504 |
> |
|
505 |
> |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
506 |
> |
|
507 |
> |
// Am I the node which has this atom? |
508 |
> |
|
509 |
> |
if (AtomToProcMap[i] == worldRank) { |
510 |
|
|
511 |
< |
myStatus = 1; |
512 |
< |
local_index=-1; |
513 |
< |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
514 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
515 |
< |
} |
516 |
< |
if (local_index != -1) { |
517 |
< |
//format the line |
511 |
> |
if (myPotato + 3 >= MAXTAG) { |
512 |
> |
|
513 |
> |
// The potato was going to exceed the maximum value, |
514 |
> |
// so wrap this processor potato back to 0 (and block until |
515 |
> |
// node 0 says we can go: |
516 |
> |
|
517 |
> |
MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus); |
518 |
> |
|
519 |
> |
} |
520 |
> |
which_atom = i; |
521 |
|
|
522 |
< |
atoms[local_index]->getPos(pos); |
523 |
< |
atoms[local_index]->getVel(vel); |
522 |
> |
local_index = indexArray[currentIndex].first; |
523 |
> |
|
524 |
> |
if (which_atom == indexArray[currentIndex].second) { |
525 |
> |
|
526 |
> |
atomTypeString = atoms[local_index]->getType(); |
527 |
> |
|
528 |
> |
atoms[local_index]->getPos(pos); |
529 |
> |
atoms[local_index]->getVel(vel); |
530 |
> |
|
531 |
> |
atomData6[0] = pos[0]; |
532 |
> |
atomData6[1] = pos[1]; |
533 |
> |
atomData6[2] = pos[2]; |
534 |
|
|
535 |
< |
sprintf( tempBuffer, |
536 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
537 |
< |
atoms[local_index]->getType(), |
538 |
< |
pos[0], |
539 |
< |
pos[1], |
301 |
< |
pos[2], |
302 |
< |
vel[0], |
303 |
< |
vel[1], |
304 |
< |
vel[2]); // check here. |
305 |
< |
strcpy( writeLine, tempBuffer ); |
535 |
> |
atomData6[3] = vel[0]; |
536 |
> |
atomData6[4] = vel[1]; |
537 |
> |
atomData6[5] = vel[2]; |
538 |
> |
|
539 |
> |
isDirectional = 0; |
540 |
|
|
541 |
< |
if( atoms[local_index]->isDirectional() ){ |
541 |
> |
if( atoms[local_index]->isDirectional() ){ |
542 |
|
|
543 |
< |
dAtom = (DirectionalAtom *)atoms[local_index]; |
544 |
< |
dAtom->getQ( q ); |
543 |
> |
isDirectional = 1; |
544 |
> |
|
545 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
546 |
> |
dAtom->getQ( q ); |
547 |
> |
|
548 |
> |
for (int j = 0; j < 6 ; j++) |
549 |
> |
atomData13[j] = atomData6[j]; |
550 |
> |
|
551 |
> |
atomData13[6] = q[0]; |
552 |
> |
atomData13[7] = q[1]; |
553 |
> |
atomData13[8] = q[2]; |
554 |
> |
atomData13[9] = q[3]; |
555 |
> |
|
556 |
> |
atomData13[10] = dAtom->getJx(); |
557 |
> |
atomData13[11] = dAtom->getJy(); |
558 |
> |
atomData13[12] = dAtom->getJz(); |
559 |
> |
} |
560 |
|
|
561 |
< |
sprintf( tempBuffer, |
562 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
563 |
< |
q[0], |
564 |
< |
q[1], |
565 |
< |
q[2], |
566 |
< |
q[3], |
318 |
< |
dAtom->getJx(), |
319 |
< |
dAtom->getJy(), |
320 |
< |
dAtom->getJz()); |
321 |
< |
strcat( writeLine, tempBuffer ); |
561 |
> |
} else { |
562 |
> |
sprintf(painCave.errMsg, |
563 |
> |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
564 |
> |
which_atom, worldRank, currentIndex, local_index ); |
565 |
> |
haveError= 1; |
566 |
> |
simError(); |
567 |
|
} |
568 |
< |
else{ |
569 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
325 |
< |
} |
326 |
< |
} |
327 |
< |
else { |
328 |
< |
sprintf(painCave.errMsg, |
329 |
< |
"Atom %d not found on processor %d\n", |
330 |
< |
which_atom, worldRank ); |
331 |
< |
myStatus = 0; |
332 |
< |
simError(); |
568 |
> |
|
569 |
> |
strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE); |
570 |
|
|
571 |
< |
strcpy( writeLine, "Hello, I'm an error.\n"); |
572 |
< |
} |
571 |
> |
// null terminate the string before sending (just in case): |
572 |
> |
MPIatomTypeString[MINIBUFFERSIZE-1] = '\0'; |
573 |
|
|
574 |
< |
MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
575 |
< |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
576 |
< |
MPI_Send( &myStatus, 1, MPI_INT, 0, |
577 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
341 |
< |
} |
342 |
< |
} |
343 |
< |
outFile.flush(); |
344 |
< |
sprintf( checkPointMsg, |
345 |
< |
"Sucessfully took a dump.\n"); |
346 |
< |
MPIcheckPoint(); |
574 |
> |
MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0, |
575 |
> |
myPotato, MPI_COMM_WORLD); |
576 |
> |
|
577 |
> |
myPotato++; |
578 |
|
|
579 |
< |
// last thing last, enable fatalities. |
580 |
< |
painCave.isEventLoop = 0; |
579 |
> |
MPI_Send(&isDirectional, 1, MPI_INT, 0, |
580 |
> |
myPotato, MPI_COMM_WORLD); |
581 |
> |
|
582 |
> |
myPotato++; |
583 |
> |
|
584 |
> |
if (isDirectional) { |
585 |
|
|
586 |
< |
#endif // is_mpi |
587 |
< |
} |
586 |
> |
MPI_Send(atomData13, 13, MPI_DOUBLE, 0, |
587 |
> |
myPotato, MPI_COMM_WORLD); |
588 |
> |
|
589 |
> |
} else { |
590 |
|
|
591 |
< |
void DumpWriter::writeFinal(double finalTime){ |
592 |
< |
|
356 |
< |
char finalName[500]; |
357 |
< |
ofstream finalOut; |
358 |
< |
|
359 |
< |
const int BUFFERSIZE = 2000; |
360 |
< |
char tempBuffer[BUFFERSIZE]; |
361 |
< |
char writeLine[BUFFERSIZE]; |
362 |
< |
|
363 |
< |
double q[4]; |
364 |
< |
DirectionalAtom* dAtom; |
365 |
< |
Atom** atoms = entry_plug->atoms; |
366 |
< |
int i; |
367 |
< |
#ifdef IS_MPI |
368 |
< |
int j, which_node, done, which_atom, local_index; |
369 |
< |
#else //is_mpi |
370 |
< |
int nAtoms = entry_plug->n_atoms; |
371 |
< |
#endif //is_mpi |
372 |
< |
|
373 |
< |
double pos[3], vel[3]; |
374 |
< |
|
375 |
< |
#ifdef IS_MPI |
376 |
< |
if(worldRank == 0 ){ |
377 |
< |
#endif // is_mpi |
378 |
< |
|
379 |
< |
strcpy( finalName, entry_plug->finalName ); |
380 |
< |
|
381 |
< |
finalOut.open( finalName, ios::out | ios::trunc ); |
382 |
< |
if( !finalOut ){ |
383 |
< |
sprintf( painCave.errMsg, |
384 |
< |
"Could not open \"%s\" for final dump output.\n", |
385 |
< |
finalName ); |
386 |
< |
painCave.isFatal = 1; |
387 |
< |
simError(); |
388 |
< |
} |
389 |
< |
|
390 |
< |
// finalOut.setf( ios::scientific ); |
391 |
< |
|
392 |
< |
#ifdef IS_MPI |
393 |
< |
} |
394 |
< |
|
395 |
< |
sprintf(checkPointMsg,"Opened file for final configuration\n"); |
396 |
< |
MPIcheckPoint(); |
397 |
< |
|
398 |
< |
#endif //is_mpi |
399 |
< |
|
400 |
< |
|
401 |
< |
#ifndef IS_MPI |
402 |
< |
|
403 |
< |
finalOut << nAtoms << "\n"; |
404 |
< |
|
405 |
< |
finalOut << finalTime << ";\t" |
406 |
< |
<< entry_plug->Hmat[0][0] << "\t" |
407 |
< |
<< entry_plug->Hmat[1][0] << "\t" |
408 |
< |
<< entry_plug->Hmat[2][0] << ";\t" |
409 |
< |
|
410 |
< |
<< entry_plug->Hmat[0][1] << "\t" |
411 |
< |
<< entry_plug->Hmat[1][1] << "\t" |
412 |
< |
<< entry_plug->Hmat[2][1] << ";\t" |
413 |
< |
|
414 |
< |
<< entry_plug->Hmat[0][2] << "\t" |
415 |
< |
<< entry_plug->Hmat[1][2] << "\t" |
416 |
< |
<< entry_plug->Hmat[2][2] << ";"; |
417 |
< |
|
418 |
< |
//write out additional parameters, such as chi and eta |
419 |
< |
finalOut << entry_plug->the_integrator->getAdditionalParameters(); |
420 |
< |
finalOut << endl; |
421 |
< |
|
422 |
< |
for( i=0; i<nAtoms; i++ ){ |
423 |
< |
|
424 |
< |
atoms[i]->getPos(pos); |
425 |
< |
atoms[i]->getVel(vel); |
426 |
< |
|
427 |
< |
sprintf( tempBuffer, |
428 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
429 |
< |
atoms[i]->getType(), |
430 |
< |
pos[0], |
431 |
< |
pos[1], |
432 |
< |
pos[2], |
433 |
< |
vel[0], |
434 |
< |
vel[1], |
435 |
< |
vel[2]); |
436 |
< |
strcpy( writeLine, tempBuffer ); |
437 |
< |
|
438 |
< |
if( atoms[i]->isDirectional() ){ |
439 |
< |
|
440 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
441 |
< |
dAtom->getQ( q ); |
442 |
< |
|
443 |
< |
sprintf( tempBuffer, |
444 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
445 |
< |
q[0], |
446 |
< |
q[1], |
447 |
< |
q[2], |
448 |
< |
q[3], |
449 |
< |
dAtom->getJx(), |
450 |
< |
dAtom->getJy(), |
451 |
< |
dAtom->getJz()); |
452 |
< |
strcat( writeLine, tempBuffer ); |
453 |
< |
} |
454 |
< |
else |
455 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
456 |
< |
|
457 |
< |
finalOut << writeLine; |
458 |
< |
} |
459 |
< |
finalOut.flush(); |
460 |
< |
finalOut.close(); |
461 |
< |
|
462 |
< |
#else // is_mpi |
463 |
< |
|
464 |
< |
// first thing first, suspend fatalities. |
465 |
< |
painCave.isEventLoop = 1; |
466 |
< |
|
467 |
< |
int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
468 |
< |
int haveError; |
469 |
< |
|
470 |
< |
MPI_Status istatus; |
471 |
< |
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
472 |
< |
|
473 |
< |
// write out header and node 0's coordinates |
474 |
< |
|
475 |
< |
haveError = 0; |
476 |
< |
if( worldRank == 0 ){ |
477 |
< |
finalOut << mpiSim->getTotAtoms() << "\n"; |
478 |
< |
|
479 |
< |
finalOut << finalTime << ";\t" |
480 |
< |
<< entry_plug->Hmat[0][0] << "\t" |
481 |
< |
<< entry_plug->Hmat[1][0] << "\t" |
482 |
< |
<< entry_plug->Hmat[2][0] << ";\t" |
483 |
< |
|
484 |
< |
<< entry_plug->Hmat[0][1] << "\t" |
485 |
< |
<< entry_plug->Hmat[1][1] << "\t" |
486 |
< |
<< entry_plug->Hmat[2][1] << ";\t" |
487 |
< |
|
488 |
< |
<< entry_plug->Hmat[0][2] << "\t" |
489 |
< |
<< entry_plug->Hmat[1][2] << "\t" |
490 |
< |
<< entry_plug->Hmat[2][2] << ";"; |
491 |
< |
|
492 |
< |
//write out additional parameters, such as chi and eta |
493 |
< |
finalOut << entry_plug->the_integrator->getAdditionalParameters(); |
494 |
< |
finalOut << endl; |
495 |
< |
|
496 |
< |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
497 |
< |
// Get the Node number which has this molecule: |
498 |
< |
|
499 |
< |
which_node = AtomToProcMap[i]; |
500 |
< |
|
501 |
< |
if (which_node == mpiSim->getMyNode()) { |
502 |
< |
|
503 |
< |
which_atom = i; |
504 |
< |
local_index=-1; |
505 |
< |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
506 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
591 |
> |
MPI_Send(atomData6, 6, MPI_DOUBLE, 0, |
592 |
> |
myPotato, MPI_COMM_WORLD); |
593 |
|
} |
508 |
– |
if (local_index != -1) { |
594 |
|
|
595 |
< |
atoms[local_index]->getPos(pos); |
596 |
< |
atoms[local_index]->getVel(vel); |
512 |
< |
|
513 |
< |
sprintf( tempBuffer, |
514 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
515 |
< |
atoms[local_index]->getType(), |
516 |
< |
pos[0], |
517 |
< |
pos[1], |
518 |
< |
pos[2], |
519 |
< |
vel[0], |
520 |
< |
vel[1], |
521 |
< |
vel[2]); |
522 |
< |
strcpy( writeLine, tempBuffer ); |
523 |
< |
|
524 |
< |
if( atoms[local_index]->isDirectional() ){ |
525 |
< |
|
526 |
< |
dAtom = (DirectionalAtom *)atoms[local_index]; |
527 |
< |
dAtom->getQ( q ); |
528 |
< |
|
529 |
< |
sprintf( tempBuffer, |
530 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
531 |
< |
q[0], |
532 |
< |
q[1], |
533 |
< |
q[2], |
534 |
< |
q[3], |
535 |
< |
dAtom->getJx(), |
536 |
< |
dAtom->getJy(), |
537 |
< |
dAtom->getJz()); |
538 |
< |
strcat( writeLine, tempBuffer ); |
539 |
< |
} |
540 |
< |
else |
541 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
542 |
< |
} |
543 |
< |
else { |
544 |
< |
sprintf(painCave.errMsg, |
545 |
< |
"Atom %d not found on processor %d\n", |
546 |
< |
i, worldRank ); |
547 |
< |
haveError= 1; |
548 |
< |
simError(); |
549 |
< |
} |
550 |
< |
|
551 |
< |
if(haveError) nodeZeroError(); |
552 |
< |
|
595 |
> |
myPotato++; |
596 |
> |
currentIndex++; |
597 |
|
} |
554 |
– |
else { |
555 |
– |
|
556 |
– |
myStatus = 1; |
557 |
– |
MPI_Send(&myStatus, 1, MPI_INT, which_node, |
558 |
– |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
559 |
– |
MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
560 |
– |
MPI_COMM_WORLD); |
561 |
– |
MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
562 |
– |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
563 |
– |
MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
564 |
– |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
565 |
– |
|
566 |
– |
if(!myStatus) nodeZeroError(); |
567 |
– |
} |
568 |
– |
|
569 |
– |
finalOut << writeLine; |
598 |
|
} |
599 |
|
|
600 |
< |
// kill everyone off: |
601 |
< |
myStatus = -1; |
602 |
< |
for (j = 1; j < mpiSim->getNumberProcessors(); j++) { |
603 |
< |
MPI_Send(&myStatus, 1, MPI_INT, j, |
604 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
605 |
< |
} |
578 |
< |
|
579 |
< |
} else { |
580 |
< |
|
581 |
< |
done = 0; |
582 |
< |
while (!done) { |
583 |
< |
|
584 |
< |
MPI_Recv(&myStatus, 1, MPI_INT, 0, |
585 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
586 |
< |
|
587 |
< |
if(!myStatus) anonymousNodeDie(); |
588 |
< |
|
589 |
< |
if(myStatus < 0) break; |
590 |
< |
|
591 |
< |
MPI_Recv(&which_atom, 1, MPI_INT, 0, |
592 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
593 |
< |
|
594 |
< |
myStatus = 1; |
595 |
< |
local_index=-1; |
596 |
< |
for (j=0; j < mpiSim->getMyNlocal(); j++) { |
597 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
598 |
< |
} |
599 |
< |
if (local_index != -1) { |
600 |
< |
|
601 |
< |
atoms[local_index]->getPos(pos); |
602 |
< |
atoms[local_index]->getVel(vel); |
603 |
< |
|
604 |
< |
//format the line |
605 |
< |
sprintf( tempBuffer, |
606 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
607 |
< |
atoms[local_index]->getType(), |
608 |
< |
pos[0], |
609 |
< |
pos[1], |
610 |
< |
pos[2], |
611 |
< |
vel[0], |
612 |
< |
vel[1], |
613 |
< |
vel[2]); // check here. |
614 |
< |
strcpy( writeLine, tempBuffer ); |
615 |
< |
|
616 |
< |
if( atoms[local_index]->isDirectional() ){ |
617 |
< |
|
618 |
< |
dAtom = (DirectionalAtom *)atoms[local_index]; |
619 |
< |
dAtom->getQ( q ); |
620 |
< |
|
621 |
< |
sprintf( tempBuffer, |
622 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
623 |
< |
q[0], |
624 |
< |
q[1], |
625 |
< |
q[2], |
626 |
< |
q[3], |
627 |
< |
dAtom->getJx(), |
628 |
< |
dAtom->getJy(), |
629 |
< |
dAtom->getJz()); |
630 |
< |
strcat( writeLine, tempBuffer ); |
631 |
< |
} |
632 |
< |
else{ |
633 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
634 |
< |
} |
635 |
< |
} |
636 |
< |
else { |
637 |
< |
sprintf(painCave.errMsg, |
638 |
< |
"Atom %d not found on processor %d\n", |
639 |
< |
which_atom, worldRank ); |
640 |
< |
myStatus = 0; |
641 |
< |
simError(); |
642 |
< |
|
643 |
< |
strcpy( writeLine, "Hello, I'm an error.\n"); |
644 |
< |
} |
645 |
< |
|
646 |
< |
MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
647 |
< |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
648 |
< |
MPI_Send( &myStatus, 1, MPI_INT, 0, |
649 |
< |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
650 |
< |
} |
651 |
< |
} |
652 |
< |
finalOut.flush(); |
653 |
< |
sprintf( checkPointMsg, |
654 |
< |
"Sucessfully took a dump.\n"); |
655 |
< |
MPIcheckPoint(); |
656 |
< |
|
657 |
< |
if( worldRank == 0 ) finalOut.close(); |
600 |
> |
sprintf( checkPointMsg, |
601 |
> |
"Sucessfully took a dump.\n"); |
602 |
> |
MPIcheckPoint(); |
603 |
> |
|
604 |
> |
} |
605 |
> |
|
606 |
|
#endif // is_mpi |
607 |
|
} |
608 |
|
|
661 |
– |
|
662 |
– |
|
609 |
|
#ifdef IS_MPI |
610 |
|
|
611 |
|
// a couple of functions to let us escape the write loop |
612 |
|
|
613 |
< |
void dWrite::nodeZeroError( void ){ |
668 |
< |
int j, myStatus; |
613 |
> |
void dWrite::DieDieDie( void ){ |
614 |
|
|
670 |
– |
myStatus = 0; |
671 |
– |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
672 |
– |
MPI_Send( &myStatus, 1, MPI_INT, j, |
673 |
– |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
674 |
– |
} |
675 |
– |
|
676 |
– |
|
615 |
|
MPI_Finalize(); |
616 |
|
exit (0); |
679 |
– |
|
617 |
|
} |
618 |
|
|
682 |
– |
void dWrite::anonymousNodeDie( void ){ |
683 |
– |
|
684 |
– |
MPI_Finalize(); |
685 |
– |
exit (0); |
686 |
– |
} |
687 |
– |
|
619 |
|
#endif //is_mpi |