1 |
< |
#include <cstring> |
1 |
> |
#define _FILE_OFFSET_BITS 64 |
2 |
> |
|
3 |
> |
#include <string.h> |
4 |
|
#include <iostream> |
5 |
|
#include <fstream> |
6 |
+ |
#include <algorithm> |
7 |
+ |
#include <utility> |
8 |
|
|
9 |
|
#ifdef IS_MPI |
10 |
|
#include <mpi.h> |
7 |
– |
#include <mpi++.h> |
11 |
|
#include "mpiSimulation.hpp" |
12 |
< |
#define TAKE_THIS_TAG 1 |
12 |
> |
|
13 |
> |
namespace dWrite{ |
14 |
> |
void DieDieDie( void ); |
15 |
> |
} |
16 |
> |
|
17 |
> |
using namespace dWrite; |
18 |
|
#endif //is_mpi |
19 |
|
|
20 |
|
#include "ReadWrite.hpp" |
27 |
|
#ifdef IS_MPI |
28 |
|
if(worldRank == 0 ){ |
29 |
|
#endif // is_mpi |
30 |
< |
|
31 |
< |
|
32 |
< |
|
33 |
< |
strcpy( outName, entry_plug->sampleName ); |
34 |
< |
|
27 |
< |
outFile.open(outName, ios::out | ios::trunc ); |
28 |
< |
|
29 |
< |
if( !outFile ){ |
30 |
< |
|
30 |
> |
|
31 |
> |
dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc ); |
32 |
> |
|
33 |
> |
if( !dumpFile ){ |
34 |
> |
|
35 |
|
sprintf( painCave.errMsg, |
36 |
|
"Could not open \"%s\" for dump output.\n", |
37 |
< |
outName); |
37 |
> |
entry_plug->sampleName); |
38 |
|
painCave.isFatal = 1; |
39 |
|
simError(); |
40 |
|
} |
37 |
– |
|
38 |
– |
//outFile.setf( ios::scientific ); |
41 |
|
|
42 |
|
#ifdef IS_MPI |
43 |
|
} |
44 |
|
|
45 |
+ |
//sort the local atoms by global index |
46 |
+ |
sortByGlobalIndex(); |
47 |
+ |
|
48 |
|
sprintf( checkPointMsg, |
49 |
|
"Sucessfully opened output file for dumping.\n"); |
50 |
|
MPIcheckPoint(); |
57 |
|
if(worldRank == 0 ){ |
58 |
|
#endif // is_mpi |
59 |
|
|
60 |
< |
outFile.close(); |
60 |
> |
dumpFile.close(); |
61 |
|
|
62 |
|
#ifdef IS_MPI |
63 |
|
} |
64 |
|
#endif // is_mpi |
65 |
|
} |
66 |
|
|
67 |
< |
void DumpWriter::writeDump( double currentTime ){ |
67 |
> |
#ifdef IS_MPI |
68 |
> |
|
69 |
> |
/** |
70 |
> |
* A hook function to load balancing |
71 |
> |
*/ |
72 |
> |
|
73 |
> |
void DumpWriter::update(){ |
74 |
> |
sortByGlobalIndex(); |
75 |
> |
} |
76 |
|
|
77 |
< |
const int BUFFERSIZE = 2000; |
78 |
< |
char tempBuffer[BUFFERSIZE]; |
79 |
< |
char writeLine[BUFFERSIZE]; |
77 |
> |
/** |
78 |
> |
* Auxiliary sorting function |
79 |
> |
*/ |
80 |
> |
|
81 |
> |
bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){ |
82 |
> |
return p1.second < p2.second; |
83 |
> |
} |
84 |
|
|
85 |
< |
int i, j, which_node, done, game_over, which_atom, local_index; |
86 |
< |
double q[4]; |
87 |
< |
DirectionalAtom* dAtom; |
88 |
< |
int nAtoms = entry_plug->n_atoms; |
85 |
> |
/** |
86 |
> |
* Sorting the local index by global index |
87 |
> |
*/ |
88 |
> |
|
89 |
> |
void DumpWriter::sortByGlobalIndex(){ |
90 |
|
Atom** atoms = entry_plug->atoms; |
91 |
< |
|
91 |
> |
|
92 |
> |
indexArray.clear(); |
93 |
> |
|
94 |
> |
for(int i = 0; i < mpiSim->getMyNlocal();i++) |
95 |
> |
indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex())); |
96 |
> |
|
97 |
> |
sort(indexArray.begin(), indexArray.end(), indexSortingCriterion); |
98 |
|
|
99 |
< |
#ifndef IS_MPI |
99 |
> |
//for (int i = 0; i < mpiSim->getMyNlocal(); i++) { |
100 |
> |
// printf("node %d has global %d at local %d\n", worldRank, indexArray[i].second, indexArray[i].first); |
101 |
> |
//} |
102 |
|
|
103 |
< |
outFile << nAtoms << "\n"; |
78 |
< |
|
79 |
< |
outFile << currentTime << "\t" |
80 |
< |
<< entry_plug->box_x << "\t" |
81 |
< |
<< entry_plug->box_y << "\t" |
82 |
< |
<< entry_plug->box_z << "\n"; |
83 |
< |
|
84 |
< |
for( i=0; i<nAtoms; i++ ){ |
85 |
< |
|
103 |
> |
} |
104 |
|
|
105 |
< |
sprintf( tempBuffer, |
88 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
89 |
< |
atoms[i]->getType(), |
90 |
< |
atoms[i]->getX(), |
91 |
< |
atoms[i]->getY(), |
92 |
< |
atoms[i]->getZ(), |
93 |
< |
atoms[i]->get_vx(), |
94 |
< |
atoms[i]->get_vy(), |
95 |
< |
atoms[i]->get_vz()); |
96 |
< |
strcpy( writeLine, tempBuffer ); |
105 |
> |
#endif |
106 |
|
|
107 |
< |
if( atoms[i]->isDirectional() ){ |
108 |
< |
|
109 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
110 |
< |
dAtom->getQ( q ); |
111 |
< |
|
112 |
< |
sprintf( tempBuffer, |
113 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
114 |
< |
q[0], |
115 |
< |
q[1], |
116 |
< |
q[2], |
117 |
< |
q[3], |
118 |
< |
dAtom->getJx(), |
119 |
< |
dAtom->getJy(), |
120 |
< |
dAtom->getJz()); |
121 |
< |
strcat( writeLine, tempBuffer ); |
107 |
> |
void DumpWriter::writeDump(double currentTime){ |
108 |
> |
|
109 |
> |
ofstream finalOut; |
110 |
> |
vector<ofstream*> fileStreams; |
111 |
> |
|
112 |
> |
#ifdef IS_MPI |
113 |
> |
if(worldRank == 0 ){ |
114 |
> |
#endif |
115 |
> |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
116 |
> |
if( !finalOut ){ |
117 |
> |
sprintf( painCave.errMsg, |
118 |
> |
"Could not open \"%s\" for final dump output.\n", |
119 |
> |
entry_plug->finalName ); |
120 |
> |
painCave.isFatal = 1; |
121 |
> |
simError(); |
122 |
|
} |
123 |
< |
else |
115 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
116 |
< |
|
117 |
< |
outFile << writeLine; |
123 |
> |
#ifdef IS_MPI |
124 |
|
} |
125 |
< |
outFile.flush(); |
125 |
> |
#endif // is_mpi |
126 |
|
|
127 |
< |
#else // is_mpi |
127 |
> |
fileStreams.push_back(&finalOut); |
128 |
> |
fileStreams.push_back(&dumpFile); |
129 |
|
|
130 |
< |
MPI::Status istatus; |
124 |
< |
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
125 |
< |
|
126 |
< |
// write out header and node 0's coordinates |
127 |
< |
|
128 |
< |
if( worldRank == 0 ){ |
129 |
< |
outFile << mpiSim->getTotAtoms() << "\n"; |
130 |
< |
|
131 |
< |
outFile << currentTime << "\t" |
132 |
< |
<< entry_plug->box_x << "\t" |
133 |
< |
<< entry_plug->box_y << "\t" |
134 |
< |
<< entry_plug->box_z << "\n"; |
135 |
< |
outFile.flush(); |
136 |
< |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
137 |
< |
// Get the Node number which has this atom; |
138 |
< |
|
139 |
< |
which_node = AtomToProcMap[i]; |
140 |
< |
|
141 |
< |
if (which_node == mpiSim->getMyNode()) { |
142 |
< |
sprintf( tempBuffer, |
143 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
144 |
< |
atoms[i]->getType(), |
145 |
< |
atoms[i]->getX(), |
146 |
< |
atoms[i]->getY(), |
147 |
< |
atoms[i]->getZ(), |
148 |
< |
atoms[i]->get_vx(), |
149 |
< |
atoms[i]->get_vy(), |
150 |
< |
atoms[i]->get_vz()); |
151 |
< |
strcpy( writeLine, tempBuffer ); |
152 |
< |
|
153 |
< |
if( atoms[i]->isDirectional() ){ |
154 |
< |
|
155 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
156 |
< |
dAtom->getQ( q ); |
157 |
< |
|
158 |
< |
sprintf( tempBuffer, |
159 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
160 |
< |
q[0], |
161 |
< |
q[1], |
162 |
< |
q[2], |
163 |
< |
q[3], |
164 |
< |
dAtom->getJx(), |
165 |
< |
dAtom->getJy(), |
166 |
< |
dAtom->getJz()); |
167 |
< |
strcat( writeLine, tempBuffer ); |
168 |
< |
} |
169 |
< |
else |
170 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
171 |
< |
|
172 |
< |
} else { |
173 |
< |
|
174 |
< |
std::cerr << "node 0: sending node " << which_node << " request for atom " << i << "\n"; |
175 |
< |
MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG); |
176 |
< |
std::cerr << "node 0: sent!\n"; |
177 |
< |
MPI::COMM_WORLD.Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
178 |
< |
TAKE_THIS_TAG, istatus); |
179 |
< |
std::cerr << "node 0: got this line: " << writeLine; |
180 |
< |
} |
181 |
< |
|
182 |
< |
outFile << writeLine; |
183 |
< |
outFile.flush(); |
184 |
< |
} |
185 |
< |
|
186 |
< |
// kill everyone off: |
187 |
< |
game_over = -1; |
188 |
< |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
189 |
< |
MPI::COMM_WORLD.Send(&game_over, 1, MPI_INT, j, TAKE_THIS_TAG); |
190 |
< |
} |
130 |
> |
writeFrame(fileStreams, currentTime); |
131 |
|
|
132 |
< |
} else { |
133 |
< |
|
134 |
< |
done = 0; |
135 |
< |
while (!done) { |
196 |
< |
std::cerr << "node: " << mpiSim->getMyNode() << " Waiting for receive \n"; |
197 |
< |
MPI::COMM_WORLD.Recv(&which_atom, 1, MPI_INT, 0, |
198 |
< |
TAKE_THIS_TAG, istatus); |
199 |
< |
std::cerr << "node: " << mpiSim->getMyNode() << " got request for atom " << which_atom << "\n"; |
200 |
< |
if (which_atom == -1) { |
201 |
< |
done=1; |
202 |
< |
continue; |
203 |
< |
} else { |
204 |
< |
local_index=-1; |
205 |
< |
for (j=0; j < mpiSim->getMyNlocal(); j++) { |
206 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
207 |
< |
} |
208 |
< |
if (local_index != -1) { |
209 |
< |
//format the line |
210 |
< |
sprintf( tempBuffer, |
211 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
212 |
< |
atoms[local_index]->getType(), |
213 |
< |
atoms[local_index]->getX(), |
214 |
< |
atoms[local_index]->getY(), |
215 |
< |
atoms[local_index]->getZ(), |
216 |
< |
atoms[local_index]->get_vx(), |
217 |
< |
atoms[local_index]->get_vy(), |
218 |
< |
atoms[local_index]->get_vz()); // check here. |
219 |
< |
strcpy( writeLine, tempBuffer ); |
220 |
< |
|
221 |
< |
if( atoms[local_index]->isDirectional() ){ |
222 |
< |
|
223 |
< |
dAtom = (DirectionalAtom *)atoms[local_index]; |
224 |
< |
dAtom->getQ( q ); |
225 |
< |
|
226 |
< |
sprintf( tempBuffer, |
227 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
228 |
< |
q[0], |
229 |
< |
q[1], |
230 |
< |
q[2], |
231 |
< |
q[3], |
232 |
< |
dAtom->getJx(), |
233 |
< |
dAtom->getJy(), |
234 |
< |
dAtom->getJz()); |
235 |
< |
strcat( writeLine, tempBuffer ); |
236 |
< |
} |
237 |
< |
else |
238 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
239 |
< |
std::cerr << "node: " << mpiSim->getMyNode() << " sending this line" << writeLine; |
240 |
< |
MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
241 |
< |
TAKE_THIS_TAG); |
242 |
< |
} else { |
243 |
< |
strcpy( writeLine, "ATOM NOT FOUND ON THIS PROCESSOR"); |
244 |
< |
MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
245 |
< |
TAKE_THIS_TAG); |
246 |
< |
} |
247 |
< |
} |
248 |
< |
} |
249 |
< |
} |
250 |
< |
outFile.flush(); |
251 |
< |
sprintf( checkPointMsg, |
252 |
< |
"Sucessfully took a dump.\n"); |
253 |
< |
MPIcheckPoint(); |
254 |
< |
#endif // is_mpi |
132 |
> |
#ifdef IS_MPI |
133 |
> |
finalOut.close(); |
134 |
> |
#endif |
135 |
> |
|
136 |
|
} |
137 |
|
|
138 |
< |
void DumpWriter::writeFinal(){ |
138 |
> |
void DumpWriter::writeFinal(double currentTime){ |
139 |
|
|
259 |
– |
char finalName[500]; |
140 |
|
ofstream finalOut; |
141 |
+ |
vector<ofstream*> fileStreams; |
142 |
|
|
262 |
– |
const int BUFFERSIZE = 2000; |
263 |
– |
char tempBuffer[BUFFERSIZE]; |
264 |
– |
char writeLine[BUFFERSIZE]; |
265 |
– |
|
266 |
– |
double q[4]; |
267 |
– |
DirectionalAtom* dAtom; |
268 |
– |
int nAtoms = entry_plug->n_atoms; |
269 |
– |
Atom** atoms = entry_plug->atoms; |
270 |
– |
int i, j, which_node, done, game_over, which_atom, local_index; |
271 |
– |
|
272 |
– |
|
143 |
|
#ifdef IS_MPI |
144 |
|
if(worldRank == 0 ){ |
145 |
|
#endif // is_mpi |
146 |
< |
|
147 |
< |
strcpy( finalName, entry_plug->finalName ); |
148 |
< |
|
279 |
< |
finalOut.open( finalName, ios::out | ios::trunc ); |
146 |
> |
|
147 |
> |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
148 |
> |
|
149 |
|
if( !finalOut ){ |
150 |
|
sprintf( painCave.errMsg, |
151 |
|
"Could not open \"%s\" for final dump output.\n", |
152 |
< |
finalName ); |
152 |
> |
entry_plug->finalName ); |
153 |
|
painCave.isFatal = 1; |
154 |
|
simError(); |
155 |
|
} |
156 |
< |
|
288 |
< |
// finalOut.setf( ios::scientific ); |
289 |
< |
|
156 |
> |
|
157 |
|
#ifdef IS_MPI |
158 |
|
} |
159 |
+ |
#endif // is_mpi |
160 |
|
|
161 |
< |
sprintf(checkPointMsg,"Opened file for final configuration\n"); |
162 |
< |
MPIcheckPoint(); |
161 |
> |
fileStreams.push_back(&finalOut); |
162 |
> |
writeFrame(fileStreams, currentTime); |
163 |
> |
|
164 |
> |
#ifdef IS_MPI |
165 |
> |
finalOut.close(); |
166 |
> |
#endif |
167 |
|
|
168 |
< |
#endif //is_mpi |
168 |
> |
} |
169 |
|
|
170 |
+ |
void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){ |
171 |
+ |
|
172 |
+ |
const int BUFFERSIZE = 2000; |
173 |
+ |
const int MINIBUFFERSIZE = 100; |
174 |
+ |
|
175 |
+ |
char tempBuffer[BUFFERSIZE]; |
176 |
+ |
char writeLine[BUFFERSIZE]; |
177 |
+ |
|
178 |
+ |
int i, k; |
179 |
+ |
|
180 |
+ |
#ifdef IS_MPI |
181 |
|
|
182 |
+ |
/********************************************************************* |
183 |
+ |
* Documentation? You want DOCUMENTATION? |
184 |
+ |
* |
185 |
+ |
* Why all the potatoes below? |
186 |
+ |
* |
187 |
+ |
* To make a long story short, the original version of DumpWriter |
188 |
+ |
* worked in the most inefficient way possible. Node 0 would |
189 |
+ |
* poke each of the node for an individual atom's formatted data |
190 |
+ |
* as node 0 worked its way down the global index. This was particularly |
191 |
+ |
* inefficient since the method blocked all processors at every atom |
192 |
+ |
* (and did it twice!). |
193 |
+ |
* |
194 |
+ |
* An intermediate version of DumpWriter could be described from Node |
195 |
+ |
* zero's perspective as follows: |
196 |
+ |
* |
197 |
+ |
* 1) Have 100 of your friends stand in a circle. |
198 |
+ |
* 2) When you say go, have all of them start tossing potatoes at |
199 |
+ |
* you (one at a time). |
200 |
+ |
* 3) Catch the potatoes. |
201 |
+ |
* |
202 |
+ |
* It was an improvement, but MPI has buffers and caches that could |
203 |
+ |
* best be described in this analogy as "potato nets", so there's no |
204 |
+ |
* need to block the processors atom-by-atom. |
205 |
+ |
* |
206 |
+ |
* This new and improved DumpWriter works in an even more efficient |
207 |
+ |
* way: |
208 |
+ |
* |
209 |
+ |
* 1) Have 100 of your friend stand in a circle. |
210 |
+ |
* 2) When you say go, have them start tossing 5-pound bags of |
211 |
+ |
* potatoes at you. |
212 |
+ |
* 3) Once you've caught a friend's bag of potatoes, |
213 |
+ |
* toss them a spud to let them know they can toss another bag. |
214 |
+ |
* |
215 |
+ |
* How's THAT for documentation? |
216 |
+ |
* |
217 |
+ |
*********************************************************************/ |
218 |
+ |
|
219 |
+ |
int *potatoes; |
220 |
+ |
int myPotato; |
221 |
+ |
|
222 |
+ |
int nProc; |
223 |
+ |
int j, which_node, done, which_atom, local_index, currentIndex; |
224 |
+ |
double atomData6[6]; |
225 |
+ |
double atomData13[13]; |
226 |
+ |
int isDirectional; |
227 |
+ |
char* atomTypeString; |
228 |
+ |
char MPIatomTypeString[MINIBUFFERSIZE]; |
229 |
+ |
|
230 |
+ |
#else //is_mpi |
231 |
+ |
int nAtoms = entry_plug->n_atoms; |
232 |
+ |
#endif //is_mpi |
233 |
+ |
|
234 |
+ |
double q[4]; |
235 |
+ |
DirectionalAtom* dAtom; |
236 |
+ |
Atom** atoms = entry_plug->atoms; |
237 |
+ |
double pos[3], vel[3]; |
238 |
+ |
|
239 |
|
#ifndef IS_MPI |
300 |
– |
|
301 |
– |
finalOut << nAtoms << "\n"; |
302 |
– |
|
303 |
– |
finalOut << entry_plug->box_x << "\t" |
304 |
– |
<< entry_plug->box_y << "\t" |
305 |
– |
<< entry_plug->box_z << "\n"; |
240 |
|
|
241 |
+ |
for(k = 0; k < outFile.size(); k++){ |
242 |
+ |
*outFile[k] << nAtoms << "\n"; |
243 |
+ |
|
244 |
+ |
*outFile[k] << currentTime << ";\t" |
245 |
+ |
<< entry_plug->Hmat[0][0] << "\t" |
246 |
+ |
<< entry_plug->Hmat[1][0] << "\t" |
247 |
+ |
<< entry_plug->Hmat[2][0] << ";\t" |
248 |
+ |
|
249 |
+ |
<< entry_plug->Hmat[0][1] << "\t" |
250 |
+ |
<< entry_plug->Hmat[1][1] << "\t" |
251 |
+ |
<< entry_plug->Hmat[2][1] << ";\t" |
252 |
+ |
|
253 |
+ |
<< entry_plug->Hmat[0][2] << "\t" |
254 |
+ |
<< entry_plug->Hmat[1][2] << "\t" |
255 |
+ |
<< entry_plug->Hmat[2][2] << ";"; |
256 |
+ |
|
257 |
+ |
//write out additional parameters, such as chi and eta |
258 |
+ |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
259 |
+ |
} |
260 |
+ |
|
261 |
|
for( i=0; i<nAtoms; i++ ){ |
262 |
< |
|
262 |
> |
|
263 |
> |
atoms[i]->getPos(pos); |
264 |
> |
atoms[i]->getVel(vel); |
265 |
> |
|
266 |
|
sprintf( tempBuffer, |
267 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
268 |
|
atoms[i]->getType(), |
269 |
< |
atoms[i]->getX(), |
270 |
< |
atoms[i]->getY(), |
271 |
< |
atoms[i]->getZ(), |
272 |
< |
atoms[i]->get_vx(), |
273 |
< |
atoms[i]->get_vy(), |
274 |
< |
atoms[i]->get_vz()); |
269 |
> |
pos[0], |
270 |
> |
pos[1], |
271 |
> |
pos[2], |
272 |
> |
vel[0], |
273 |
> |
vel[1], |
274 |
> |
vel[2]); |
275 |
|
strcpy( writeLine, tempBuffer ); |
276 |
|
|
277 |
|
if( atoms[i]->isDirectional() ){ |
278 |
< |
|
278 |
> |
|
279 |
|
dAtom = (DirectionalAtom *)atoms[i]; |
280 |
|
dAtom->getQ( q ); |
281 |
< |
|
281 |
> |
|
282 |
|
sprintf( tempBuffer, |
283 |
|
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
284 |
|
q[0], |
292 |
|
} |
293 |
|
else |
294 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
295 |
< |
|
296 |
< |
finalOut << writeLine; |
295 |
> |
|
296 |
> |
for(k = 0; k < outFile.size(); k++) |
297 |
> |
*outFile[k] << writeLine; |
298 |
|
} |
341 |
– |
finalOut.flush(); |
342 |
– |
finalOut.close(); |
299 |
|
|
300 |
|
#else // is_mpi |
301 |
+ |
|
302 |
+ |
/* code to find maximum tag value */ |
303 |
|
|
304 |
< |
MPI::Status istatus; |
304 |
> |
int *tagub, flag, MAXTAG; |
305 |
> |
MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag); |
306 |
> |
if (flag) { |
307 |
> |
MAXTAG = *tagub; |
308 |
> |
} else { |
309 |
> |
MAXTAG = 32767; |
310 |
> |
} |
311 |
> |
|
312 |
> |
int haveError; |
313 |
> |
|
314 |
> |
MPI_Status istatus; |
315 |
|
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
316 |
|
|
317 |
|
// write out header and node 0's coordinates |
318 |
< |
|
318 |
> |
|
319 |
|
if( worldRank == 0 ){ |
320 |
< |
finalOut << mpiSim->getTotAtoms() << "\n"; |
320 |
> |
|
321 |
> |
// Node 0 needs a list of the magic potatoes for each processor; |
322 |
> |
|
323 |
> |
nProc = mpiSim->getNumberProcessors(); |
324 |
> |
potatoes = new int[nProc]; |
325 |
> |
|
326 |
> |
//write out the comment lines |
327 |
> |
for (i = 0; i < nProc; i++) |
328 |
> |
potatoes[i] = 0; |
329 |
|
|
330 |
< |
finalOut << entry_plug->box_x << "\t" |
331 |
< |
<< entry_plug->box_y << "\t" |
332 |
< |
<< entry_plug->box_z << "\n"; |
333 |
< |
|
330 |
> |
for(k = 0; k < outFile.size(); k++){ |
331 |
> |
*outFile[k] << mpiSim->getTotAtoms() << "\n"; |
332 |
> |
|
333 |
> |
*outFile[k] << currentTime << ";\t" |
334 |
> |
<< entry_plug->Hmat[0][0] << "\t" |
335 |
> |
<< entry_plug->Hmat[1][0] << "\t" |
336 |
> |
<< entry_plug->Hmat[2][0] << ";\t" |
337 |
> |
|
338 |
> |
<< entry_plug->Hmat[0][1] << "\t" |
339 |
> |
<< entry_plug->Hmat[1][1] << "\t" |
340 |
> |
<< entry_plug->Hmat[2][1] << ";\t" |
341 |
> |
|
342 |
> |
<< entry_plug->Hmat[0][2] << "\t" |
343 |
> |
<< entry_plug->Hmat[1][2] << "\t" |
344 |
> |
<< entry_plug->Hmat[2][2] << ";"; |
345 |
> |
|
346 |
> |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
347 |
> |
} |
348 |
> |
|
349 |
> |
currentIndex = 0; |
350 |
> |
|
351 |
|
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
359 |
– |
// Get the Node number which has this molecule: |
352 |
|
|
353 |
< |
which_node = AtomToProcMap[i]; |
353 |
> |
// Get the Node number which has this atom; |
354 |
|
|
355 |
< |
if (which_node == mpiSim->getMyNode()) { |
355 |
> |
which_node = AtomToProcMap[i]; |
356 |
> |
|
357 |
> |
if (which_node != 0) { |
358 |
> |
|
359 |
> |
if (potatoes[which_node] + 3 >= MAXTAG) { |
360 |
> |
// The potato was going to exceed the maximum value, |
361 |
> |
// so wrap this processor potato back to 0: |
362 |
> |
|
363 |
> |
potatoes[which_node] = 0; |
364 |
> |
MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD); |
365 |
> |
|
366 |
> |
} |
367 |
> |
|
368 |
> |
myPotato = potatoes[which_node]; |
369 |
|
|
370 |
< |
sprintf( tempBuffer, |
371 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
372 |
< |
atoms[i]->getType(), |
373 |
< |
atoms[i]->getX(), |
369 |
< |
atoms[i]->getY(), |
370 |
< |
atoms[i]->getZ(), |
371 |
< |
atoms[i]->get_vx(), |
372 |
< |
atoms[i]->get_vy(), |
373 |
< |
atoms[i]->get_vz()); |
374 |
< |
strcpy( writeLine, tempBuffer ); |
370 |
> |
MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node, |
371 |
> |
myPotato, MPI_COMM_WORLD, &istatus); |
372 |
> |
|
373 |
> |
atomTypeString = MPIatomTypeString; |
374 |
|
|
375 |
< |
if( atoms[i]->isDirectional() ){ |
375 |
> |
myPotato++; |
376 |
> |
|
377 |
> |
MPI_Recv(&isDirectional, 1, MPI_INT, which_node, |
378 |
> |
myPotato, MPI_COMM_WORLD, &istatus); |
379 |
> |
|
380 |
> |
myPotato++; |
381 |
> |
|
382 |
> |
if (isDirectional) { |
383 |
> |
MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node, |
384 |
> |
myPotato, MPI_COMM_WORLD, &istatus); |
385 |
> |
} else { |
386 |
> |
MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node, |
387 |
> |
myPotato, MPI_COMM_WORLD, &istatus); |
388 |
> |
} |
389 |
> |
|
390 |
> |
myPotato++; |
391 |
> |
potatoes[which_node] = myPotato; |
392 |
> |
|
393 |
> |
} else { |
394 |
> |
|
395 |
> |
haveError = 0; |
396 |
> |
which_atom = i; |
397 |
> |
|
398 |
> |
//local_index = -1; |
399 |
> |
|
400 |
> |
//for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
401 |
> |
// if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
402 |
> |
//} |
403 |
> |
|
404 |
> |
//if (local_index != -1) { |
405 |
|
|
406 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
407 |
< |
dAtom->getQ( q ); |
406 |
> |
local_index = indexArray[currentIndex].first; |
407 |
> |
|
408 |
> |
if (which_atom == indexArray[currentIndex].second) { |
409 |
> |
|
410 |
> |
atomTypeString = atoms[local_index]->getType(); |
411 |
> |
|
412 |
> |
atoms[local_index]->getPos(pos); |
413 |
> |
atoms[local_index]->getVel(vel); |
414 |
|
|
415 |
< |
sprintf( tempBuffer, |
416 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
417 |
< |
q[0], |
418 |
< |
q[1], |
419 |
< |
q[2], |
420 |
< |
q[3], |
421 |
< |
dAtom->getJx(), |
422 |
< |
dAtom->getJy(), |
423 |
< |
dAtom->getJz()); |
424 |
< |
strcat( writeLine, tempBuffer ); |
425 |
< |
} |
426 |
< |
else |
427 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
415 |
> |
atomData6[0] = pos[0]; |
416 |
> |
atomData6[1] = pos[1]; |
417 |
> |
atomData6[2] = pos[2]; |
418 |
> |
|
419 |
> |
atomData6[3] = vel[0]; |
420 |
> |
atomData6[4] = vel[1]; |
421 |
> |
atomData6[5] = vel[2]; |
422 |
> |
|
423 |
> |
isDirectional = 0; |
424 |
> |
|
425 |
> |
if( atoms[local_index]->isDirectional() ){ |
426 |
> |
|
427 |
> |
isDirectional = 1; |
428 |
> |
|
429 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
430 |
> |
dAtom->getQ( q ); |
431 |
> |
|
432 |
> |
for (int j = 0; j < 6 ; j++) |
433 |
> |
atomData13[j] = atomData6[j]; |
434 |
> |
|
435 |
> |
atomData13[6] = q[0]; |
436 |
> |
atomData13[7] = q[1]; |
437 |
> |
atomData13[8] = q[2]; |
438 |
> |
atomData13[9] = q[3]; |
439 |
> |
|
440 |
> |
atomData13[10] = dAtom->getJx(); |
441 |
> |
atomData13[11] = dAtom->getJy(); |
442 |
> |
atomData13[12] = dAtom->getJz(); |
443 |
> |
} |
444 |
> |
|
445 |
> |
} else { |
446 |
> |
sprintf(painCave.errMsg, |
447 |
> |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
448 |
> |
which_atom, worldRank, currentIndex, local_index ); |
449 |
> |
haveError= 1; |
450 |
> |
simError(); |
451 |
> |
} |
452 |
|
|
453 |
+ |
if(haveError) DieDieDie(); |
454 |
+ |
|
455 |
+ |
currentIndex++; |
456 |
+ |
} |
457 |
+ |
// If we've survived to here, format the line: |
458 |
+ |
|
459 |
+ |
if (!isDirectional) { |
460 |
+ |
|
461 |
+ |
sprintf( writeLine, |
462 |
+ |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
463 |
+ |
atomTypeString, |
464 |
+ |
atomData6[0], |
465 |
+ |
atomData6[1], |
466 |
+ |
atomData6[2], |
467 |
+ |
atomData6[3], |
468 |
+ |
atomData6[4], |
469 |
+ |
atomData6[5]); |
470 |
+ |
|
471 |
+ |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
472 |
+ |
|
473 |
|
} else { |
474 |
+ |
|
475 |
+ |
sprintf( writeLine, |
476 |
+ |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
477 |
+ |
atomTypeString, |
478 |
+ |
atomData13[0], |
479 |
+ |
atomData13[1], |
480 |
+ |
atomData13[2], |
481 |
+ |
atomData13[3], |
482 |
+ |
atomData13[4], |
483 |
+ |
atomData13[5], |
484 |
+ |
atomData13[6], |
485 |
+ |
atomData13[7], |
486 |
+ |
atomData13[8], |
487 |
+ |
atomData13[9], |
488 |
+ |
atomData13[10], |
489 |
+ |
atomData13[11], |
490 |
+ |
atomData13[12]); |
491 |
|
|
397 |
– |
MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG); |
398 |
– |
MPI::COMM_WORLD.Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
399 |
– |
TAKE_THIS_TAG, istatus); |
492 |
|
} |
493 |
|
|
494 |
< |
finalOut << writeLine; |
494 |
> |
for(k = 0; k < outFile.size(); k++) |
495 |
> |
*outFile[k] << writeLine; |
496 |
|
} |
497 |
|
|
498 |
< |
// kill everyone off: |
499 |
< |
game_over = -1; |
407 |
< |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
408 |
< |
MPI::COMM_WORLD.Send(&game_over, 1, MPI_INT, j, TAKE_THIS_TAG); |
409 |
< |
} |
410 |
< |
|
411 |
< |
} else { |
498 |
> |
for(k = 0; k < outFile.size(); k++) |
499 |
> |
outFile[k]->flush(); |
500 |
|
|
501 |
< |
done = 0; |
502 |
< |
while (!done) { |
503 |
< |
MPI::COMM_WORLD.Recv(&which_atom, 1, MPI_INT, 0, |
504 |
< |
TAKE_THIS_TAG, istatus); |
501 |
> |
sprintf( checkPointMsg, |
502 |
> |
"Sucessfully took a dump.\n"); |
503 |
> |
|
504 |
> |
MPIcheckPoint(); |
505 |
> |
|
506 |
> |
delete[] potatoes; |
507 |
> |
|
508 |
> |
} else { |
509 |
|
|
510 |
< |
if (which_atom == -1) { |
419 |
< |
done=1; |
420 |
< |
continue; |
421 |
< |
} else { |
510 |
> |
// worldRank != 0, so I'm a remote node. |
511 |
|
|
512 |
< |
local_index=-1; |
513 |
< |
for (j=0; j < mpiSim->getMyNlocal(); j++) { |
514 |
< |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
512 |
> |
// Set my magic potato to 0: |
513 |
> |
|
514 |
> |
myPotato = 0; |
515 |
> |
currentIndex = 0; |
516 |
> |
|
517 |
> |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
518 |
> |
|
519 |
> |
// Am I the node which has this atom? |
520 |
> |
|
521 |
> |
if (AtomToProcMap[i] == worldRank) { |
522 |
> |
|
523 |
> |
if (myPotato + 3 >= MAXTAG) { |
524 |
> |
|
525 |
> |
// The potato was going to exceed the maximum value, |
526 |
> |
// so wrap this processor potato back to 0 (and block until |
527 |
> |
// node 0 says we can go: |
528 |
> |
|
529 |
> |
MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus); |
530 |
> |
|
531 |
|
} |
532 |
< |
if (local_index != -1) { |
532 |
> |
which_atom = i; |
533 |
|
|
534 |
< |
//format the line |
535 |
< |
sprintf( tempBuffer, |
536 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
537 |
< |
atoms[local_index]->getType(), |
538 |
< |
atoms[local_index]->getX(), |
539 |
< |
atoms[local_index]->getY(), |
540 |
< |
atoms[local_index]->getZ(), |
541 |
< |
atoms[local_index]->get_vx(), |
542 |
< |
atoms[local_index]->get_vy(), |
543 |
< |
atoms[local_index]->get_vz()); // check here. |
544 |
< |
strcpy( writeLine, tempBuffer ); |
534 |
> |
//local_index = -1; |
535 |
> |
|
536 |
> |
//for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
537 |
> |
// if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
538 |
> |
//} |
539 |
> |
|
540 |
> |
//if (local_index != -1) { |
541 |
> |
|
542 |
> |
local_index = indexArray[currentIndex].first; |
543 |
> |
|
544 |
> |
if (which_atom == indexArray[currentIndex].second) { |
545 |
> |
|
546 |
> |
atomTypeString = atoms[local_index]->getType(); |
547 |
|
|
548 |
+ |
atoms[local_index]->getPos(pos); |
549 |
+ |
atoms[local_index]->getVel(vel); |
550 |
+ |
|
551 |
+ |
atomData6[0] = pos[0]; |
552 |
+ |
atomData6[1] = pos[1]; |
553 |
+ |
atomData6[2] = pos[2]; |
554 |
+ |
|
555 |
+ |
atomData6[3] = vel[0]; |
556 |
+ |
atomData6[4] = vel[1]; |
557 |
+ |
atomData6[5] = vel[2]; |
558 |
+ |
|
559 |
+ |
isDirectional = 0; |
560 |
+ |
|
561 |
|
if( atoms[local_index]->isDirectional() ){ |
562 |
+ |
|
563 |
+ |
isDirectional = 1; |
564 |
|
|
565 |
|
dAtom = (DirectionalAtom *)atoms[local_index]; |
566 |
|
dAtom->getQ( q ); |
567 |
< |
|
568 |
< |
sprintf( tempBuffer, |
569 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
570 |
< |
q[0], |
571 |
< |
q[1], |
572 |
< |
q[2], |
573 |
< |
q[3], |
574 |
< |
dAtom->getJx(), |
575 |
< |
dAtom->getJy(), |
576 |
< |
dAtom->getJz()); |
577 |
< |
strcat( writeLine, tempBuffer ); |
567 |
> |
|
568 |
> |
for (int j = 0; j < 6 ; j++) |
569 |
> |
atomData13[j] = atomData6[j]; |
570 |
> |
|
571 |
> |
atomData13[6] = q[0]; |
572 |
> |
atomData13[7] = q[1]; |
573 |
> |
atomData13[8] = q[2]; |
574 |
> |
atomData13[9] = q[3]; |
575 |
> |
|
576 |
> |
atomData13[10] = dAtom->getJx(); |
577 |
> |
atomData13[11] = dAtom->getJy(); |
578 |
> |
atomData13[12] = dAtom->getJz(); |
579 |
|
} |
580 |
< |
else |
581 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
580 |
> |
|
581 |
> |
} else { |
582 |
> |
sprintf(painCave.errMsg, |
583 |
> |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
584 |
> |
which_atom, worldRank, currentIndex, local_index ); |
585 |
> |
haveError= 1; |
586 |
> |
simError(); |
587 |
> |
} |
588 |
> |
|
589 |
> |
strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE); |
590 |
> |
|
591 |
> |
// null terminate the string before sending (just in case): |
592 |
> |
MPIatomTypeString[MINIBUFFERSIZE-1] = '\0'; |
593 |
> |
|
594 |
> |
MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0, |
595 |
> |
myPotato, MPI_COMM_WORLD); |
596 |
> |
|
597 |
> |
myPotato++; |
598 |
> |
|
599 |
> |
MPI_Send(&isDirectional, 1, MPI_INT, 0, |
600 |
> |
myPotato, MPI_COMM_WORLD); |
601 |
> |
|
602 |
> |
myPotato++; |
603 |
> |
|
604 |
> |
if (isDirectional) { |
605 |
> |
|
606 |
> |
MPI_Send(atomData13, 13, MPI_DOUBLE, 0, |
607 |
> |
myPotato, MPI_COMM_WORLD); |
608 |
|
|
460 |
– |
MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
461 |
– |
TAKE_THIS_TAG); |
609 |
|
} else { |
610 |
< |
strcpy( writeLine, "ATOM NOT FOUND ON THIS PROCESSOR"); |
611 |
< |
MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
612 |
< |
TAKE_THIS_TAG); |
610 |
> |
|
611 |
> |
MPI_Send(atomData6, 6, MPI_DOUBLE, 0, |
612 |
> |
myPotato, MPI_COMM_WORLD); |
613 |
|
} |
614 |
+ |
|
615 |
+ |
myPotato++; |
616 |
+ |
currentIndex++; |
617 |
|
} |
618 |
|
} |
469 |
– |
} |
470 |
– |
finalOut.flush(); |
471 |
– |
sprintf( checkPointMsg, |
472 |
– |
"Sucessfully took a dump.\n"); |
473 |
– |
MPIcheckPoint(); |
619 |
|
|
620 |
< |
if( worldRank == 0 ) finalOut.close(); |
620 |
> |
sprintf( checkPointMsg, |
621 |
> |
"Sucessfully took a dump.\n"); |
622 |
> |
MPIcheckPoint(); |
623 |
> |
|
624 |
> |
} |
625 |
> |
|
626 |
|
#endif // is_mpi |
627 |
|
} |
628 |
+ |
|
629 |
+ |
#ifdef IS_MPI |
630 |
+ |
|
631 |
+ |
// a couple of functions to let us escape the write loop |
632 |
+ |
|
633 |
+ |
void dWrite::DieDieDie( void ){ |
634 |
+ |
|
635 |
+ |
MPI_Finalize(); |
636 |
+ |
exit (0); |
637 |
+ |
} |
638 |
+ |
|
639 |
+ |
#endif //is_mpi |