7 |
|
|
8 |
|
using namespace std; |
9 |
|
|
10 |
< |
ZConsWriter::ZConsWriter(const char* filename) |
10 |
> |
ZConsWriter::ZConsWriter(const char* filename, vector<ZConsParaItem>* thePara) |
11 |
|
{ |
12 |
|
//use master - slave mode, only master node writes to disk |
13 |
|
#ifdef IS_MPI |
18 |
|
|
19 |
|
if(!output){ |
20 |
|
sprintf( painCave.errMsg, |
21 |
< |
"Could not open \"s\" for z constrain output \n", |
22 |
< |
filename); |
21 |
> |
"Could not open %s for z constrain output \n", |
22 |
> |
filename); |
23 |
|
painCave.isFatal = 1; |
24 |
|
simError(); |
25 |
|
} |
26 |
|
output << "#number of z constrain molecules" << endl; |
27 |
< |
output << "#global Index of molecule\trefZ" << endl; |
28 |
< |
|
27 |
> |
output << "#global Index of molecule\tzPos" << endl; |
28 |
> |
output << "#every frame will contain below data" <<endl; |
29 |
|
output << "#time(fs)" << endl; |
30 |
< |
output << "#number of z constrain molecules" << endl; |
31 |
< |
output << "#global Index of molecule\tzconstrain force" << endl; |
30 |
> |
output << "#number of fixed z-constrain molecules" << endl; |
31 |
> |
output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl; |
32 |
> |
|
33 |
> |
parameters = thePara; |
34 |
> |
writeZPos(); |
35 |
> |
|
36 |
|
#ifdef IS_MPI |
37 |
|
} |
38 |
|
#endif |
51 |
|
#endif |
52 |
|
} |
53 |
|
|
54 |
< |
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz) |
55 |
< |
{ |
54 |
> |
/** |
55 |
> |
* |
56 |
> |
*/ |
57 |
> |
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos, double* zpos){ |
58 |
|
|
59 |
|
#ifndef IS_MPI |
54 |
– |
vector<pair<int, double> > data; // The space between two ">" is needed. Otherwise, compileer |
55 |
– |
// will take it as redirect symbol ">>" |
56 |
– |
|
57 |
– |
for(int i = 0; i < num ; i++) |
58 |
– |
data.push_back(pair<int, double>(index[i], fz[i])); |
59 |
– |
|
60 |
|
output << time << endl; |
61 |
|
output << num << endl; |
62 |
|
|
63 |
< |
//sort data by index |
64 |
< |
sort(data.begin(), data.end()); |
65 |
< |
|
66 |
< |
for(int i=0; i < data.size(); i++) |
67 |
< |
output << data[i].first << "\t" << data[i].second << endl; |
68 |
< |
|
69 |
< |
#else |
63 |
> |
for(int i = 0; i < num; i++) |
64 |
> |
output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << "\t" << zpos[i] <<endl; |
65 |
|
|
66 |
< |
//master node will be responsible for receiving, assembling and writing data |
67 |
< |
if(worldRank == 0) |
68 |
< |
{ |
66 |
> |
#else |
67 |
> |
int totalNum; |
68 |
> |
MPI_Allreduce(&num, &totalNum, 1, MPI_INT,MPI_SUM, MPI_COMM_WORLD); |
69 |
|
|
70 |
< |
vector<pair<int,double> > data; |
71 |
< |
int numProcessors; |
72 |
< |
int recvCount; |
73 |
< |
int* indexBuf; |
74 |
< |
double* fzBuf; |
75 |
< |
MPI_Status istatus; |
76 |
< |
|
77 |
< |
//process the data in master |
78 |
< |
for(int i=0; i < num; i++){ |
79 |
< |
data.push_back(pair<int, double>(index[i], fz[i])); |
80 |
< |
} |
70 |
> |
if(worldRank == 0){ |
71 |
> |
output << time << endl; |
72 |
> |
output << totalNum << endl; |
73 |
> |
} |
74 |
> |
|
75 |
> |
int whichNode; |
76 |
> |
enum CommType { RequesPosAndForce, EndOfRequest} status; |
77 |
> |
double pos; |
78 |
> |
double force; |
79 |
> |
double zconsPos; |
80 |
> |
int localIndex; |
81 |
> |
MPI_Status ierr; |
82 |
> |
int tag = 0; |
83 |
> |
|
84 |
> |
if(worldRank == 0){ |
85 |
|
|
86 |
< |
|
87 |
< |
numProcessors = mpiSim->getNumberProcessors(); |
86 |
> |
int globalIndexOfCurMol; |
87 |
> |
int *MolToProcMap; |
88 |
> |
MolToProcMap = mpiSim->getMolToProcMap(); |
89 |
|
|
90 |
< |
//acquire the data from other nodes; |
91 |
< |
for(int whichNode = 1; whichNode < numProcessors; whichNode++){ |
92 |
< |
|
90 |
> |
for(int i = 0; i < (int)(parameters->size()); i++){ |
91 |
|
|
92 |
< |
MPI_Recv(&recvCount, 1, MPI_INT, whichNode, |
93 |
< |
0, MPI_COMM_WORLD, &istatus); |
92 |
> |
globalIndexOfCurMol = (*parameters)[i].zconsIndex; |
93 |
> |
whichNode = MolToProcMap[globalIndexOfCurMol]; |
94 |
|
|
95 |
< |
if(recvCount > 0){ |
98 |
< |
|
99 |
< |
indexBuf = new int[recvCount]; |
100 |
< |
fzBuf = new double[recvCount]; |
95 |
> |
if(whichNode == 0){ |
96 |
|
|
97 |
< |
if(!indexBuf || !fzBuf){ |
98 |
< |
sprintf(painCave.errMsg, |
99 |
< |
"Memory Allocation inside class ZConsWriter\n"); |
100 |
< |
painCave.isFatal = 1; |
101 |
< |
simError(); |
107 |
< |
} |
108 |
< |
|
109 |
< |
MPI_Recv(indexBuf, recvCount, MPI_INT, whichNode, |
110 |
< |
0, MPI_COMM_WORLD, &istatus); |
97 |
> |
for(int j = 0; j < num; j++) |
98 |
> |
if(index[j] == globalIndexOfCurMol){ |
99 |
> |
localIndex = j; |
100 |
> |
break; |
101 |
> |
} |
102 |
|
|
103 |
< |
MPI_Recv(fzBuf, recvCount, MPI_DOUBLE_PRECISION, whichNode, |
104 |
< |
0, MPI_COMM_WORLD, &istatus); |
105 |
< |
|
115 |
< |
//assemble the data |
116 |
< |
for(int i = 0; i < recvCount; i++){ |
117 |
< |
data.push_back(pair<int, double>(indexBuf[i], fzBuf[i])); |
118 |
< |
} |
119 |
< |
|
120 |
< |
|
121 |
< |
delete[] indexBuf; |
122 |
< |
delete[] fzBuf; |
123 |
< |
|
103 |
> |
force = fz[localIndex]; |
104 |
> |
pos = curZPos[localIndex]; |
105 |
> |
|
106 |
|
} |
107 |
< |
|
108 |
< |
} |
107 |
> |
else{ |
108 |
> |
status = RequesPosAndForce; |
109 |
> |
MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
110 |
> |
MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
111 |
> |
MPI_Recv(&force, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
112 |
> |
MPI_Recv(&pos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
113 |
> |
MPI_Recv(&zconsPos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
114 |
> |
} |
115 |
> |
|
116 |
> |
output << globalIndexOfCurMol << "\t" << force << "\t" << pos << "\t"<< zconsPos << endl; |
117 |
> |
|
118 |
> |
} //End of Request Loop |
119 |
|
|
120 |
< |
// sort the data by index |
121 |
< |
sort(data.begin(), data.end()); |
120 |
> |
//Send ending request message to slave nodes |
121 |
> |
status = EndOfRequest; |
122 |
> |
for(int i =1; i < mpiSim->getNprocessors(); i++) |
123 |
> |
MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); |
124 |
> |
|
125 |
> |
} |
126 |
> |
else{ |
127 |
> |
|
128 |
> |
int whichMol; |
129 |
> |
bool done = false; |
130 |
> |
|
131 |
> |
while (!done){ |
132 |
> |
|
133 |
> |
MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr); |
134 |
|
|
135 |
< |
output << time << endl; |
136 |
< |
output << data.size() << endl; |
135 |
> |
switch (status){ |
136 |
> |
|
137 |
> |
case RequesPosAndForce : |
138 |
> |
|
139 |
> |
MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr); |
140 |
|
|
141 |
< |
for(int i = 0; i < data.size(); i++){ |
141 |
> |
for(int i = 0; i < num; i++) |
142 |
> |
if(index[i] == whichMol){ |
143 |
> |
localIndex = i; |
144 |
> |
break; |
145 |
> |
} |
146 |
|
|
147 |
< |
output << data[i].first << "\t" << data[i].second << endl; |
147 |
> |
MPI_Send(&fz[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
148 |
> |
MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
149 |
> |
MPI_Send(&zpos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
150 |
> |
break; |
151 |
> |
|
152 |
> |
case EndOfRequest : |
153 |
> |
|
154 |
> |
done = true; |
155 |
> |
break; |
156 |
> |
} |
157 |
> |
|
158 |
|
} |
159 |
< |
|
139 |
< |
} |
140 |
< |
else |
141 |
< |
{ |
142 |
< |
MPI_Send(&num, 1, MPI_INT, 0, 0, MPI_COMM_WORLD); |
143 |
< |
|
144 |
< |
if(num > 0){ |
145 |
< |
MPI_Send(index, num, MPI_INT, 0, 0, MPI_COMM_WORLD); |
146 |
< |
MPI_Send(fz, num, MPI_DOUBLE_PRECISION, 0, 0, MPI_COMM_WORLD); |
147 |
< |
} |
159 |
> |
|
160 |
|
} |
161 |
|
|
162 |
|
#endif |
163 |
|
|
164 |
|
} |
165 |
|
|
166 |
< |
void ZConsWriter::writeRefZ(const vector<int>& index, const vector<double>& refZ){ |
166 |
> |
/* |
167 |
> |
* |
168 |
> |
*/ |
169 |
> |
void ZConsWriter::writeZPos(){ |
170 |
|
|
171 |
|
#ifdef IS_MPI |
172 |
|
if(worldRank == 0){ |
173 |
|
#endif |
174 |
|
|
175 |
< |
output << index.size() << endl; |
175 |
> |
output << parameters->size() << endl; |
176 |
|
|
177 |
< |
for(int i =0 ; i < index.size(); i++) |
178 |
< |
output << index[i] << "\t" << refZ[i] << endl; |
177 |
> |
for(int i =0 ; i < (int)(parameters->size()); i++) |
178 |
> |
output << (*parameters)[i].zconsIndex << "\t" << (*parameters)[i].zPos << endl; |
179 |
|
|
180 |
|
#ifdef IS_MPI |
181 |
|
} |