1 |
gezelter |
2 |
#include <algorithm> |
2 |
|
|
#include <iostream> |
3 |
|
|
#include <vector> |
4 |
|
|
//#include <pair> |
5 |
tim |
3 |
#include "io/ZConsWriter.hpp" |
6 |
|
|
#include "utils/simError.h" |
7 |
gezelter |
2 |
|
8 |
|
|
using namespace std; |
9 |
|
|
|
10 |
|
|
ZConsWriter::ZConsWriter(const char* filename, vector<ZConsParaItem>* thePara) |
11 |
|
|
{ |
12 |
|
|
//use master - slave mode, only master node writes to disk |
13 |
|
|
#ifdef IS_MPI |
14 |
|
|
if(worldRank == 0){ |
15 |
|
|
#endif |
16 |
|
|
|
17 |
|
|
output.open(filename); |
18 |
|
|
|
19 |
|
|
if(!output){ |
20 |
|
|
sprintf( painCave.errMsg, |
21 |
|
|
"Could not open %s for z constrain output \n", |
22 |
|
|
filename); |
23 |
|
|
painCave.isFatal = 1; |
24 |
|
|
simError(); |
25 |
|
|
} |
26 |
|
|
output << "#number of z constrain molecules" << endl; |
27 |
|
|
output << "#global Index of molecule\tzPos" << endl; |
28 |
|
|
output << "#every frame will contain below data" <<endl; |
29 |
|
|
output << "#time(fs)" << endl; |
30 |
|
|
output << "#number of fixed z-constrain molecules" << endl; |
31 |
|
|
output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl; |
32 |
|
|
|
33 |
|
|
parameters = thePara; |
34 |
|
|
writeZPos(); |
35 |
|
|
|
36 |
|
|
#ifdef IS_MPI |
37 |
|
|
} |
38 |
|
|
#endif |
39 |
|
|
|
40 |
|
|
} |
41 |
|
|
|
42 |
|
|
ZConsWriter::~ZConsWriter() |
43 |
|
|
{ |
44 |
|
|
|
45 |
|
|
#ifdef IS_MPI |
46 |
|
|
if(worldRank == 0 ){ |
47 |
|
|
#endif |
48 |
|
|
output.close(); |
49 |
|
|
#ifdef IS_MPI |
50 |
|
|
} |
51 |
|
|
#endif |
52 |
|
|
} |
53 |
|
|
|
54 |
|
|
/** |
55 |
|
|
* |
56 |
|
|
*/ |
57 |
|
|
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos, double* zpos){ |
58 |
|
|
|
59 |
|
|
#ifndef IS_MPI |
60 |
|
|
output << time << endl; |
61 |
|
|
output << num << endl; |
62 |
|
|
|
63 |
|
|
for(int i = 0; i < num; i++) |
64 |
|
|
output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << "\t" << zpos[i] <<endl; |
65 |
|
|
|
66 |
|
|
#else |
67 |
|
|
int totalNum; |
68 |
|
|
MPI_Allreduce(&num, &totalNum, 1, MPI_INT,MPI_SUM, MPI_COMM_WORLD); |
69 |
|
|
|
70 |
|
|
if(worldRank == 0){ |
71 |
|
|
output << time << endl; |
72 |
|
|
output << totalNum << endl; |
73 |
|
|
} |
74 |
|
|
|
75 |
|
|
int whichNode; |
76 |
|
|
enum CommType { RequesPosAndForce, EndOfRequest} status; |
77 |
|
|
double pos; |
78 |
|
|
double force; |
79 |
|
|
double zconsPos; |
80 |
|
|
int localIndex; |
81 |
|
|
MPI_Status ierr; |
82 |
|
|
int tag = 0; |
83 |
|
|
|
84 |
|
|
if(worldRank == 0){ |
85 |
|
|
|
86 |
|
|
int globalIndexOfCurMol; |
87 |
|
|
int *MolToProcMap; |
88 |
|
|
MolToProcMap = mpiSim->getMolToProcMap(); |
89 |
|
|
|
90 |
|
|
for(int i = 0; i < (int)(parameters->size()); i++){ |
91 |
|
|
|
92 |
|
|
globalIndexOfCurMol = (*parameters)[i].zconsIndex; |
93 |
|
|
whichNode = MolToProcMap[globalIndexOfCurMol]; |
94 |
|
|
|
95 |
|
|
if(whichNode == 0){ |
96 |
|
|
|
97 |
|
|
for(int j = 0; j < num; j++) |
98 |
|
|
if(index[j] == globalIndexOfCurMol){ |
99 |
|
|
localIndex = j; |
100 |
|
|
break; |
101 |
|
|
} |
102 |
|
|
|
103 |
|
|
force = fz[localIndex]; |
104 |
|
|
pos = curZPos[localIndex]; |
105 |
|
|
|
106 |
|
|
} |
107 |
|
|
else{ |
108 |
|
|
status = RequesPosAndForce; |
109 |
|
|
MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
110 |
|
|
MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
111 |
|
|
MPI_Recv(&force, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
112 |
|
|
MPI_Recv(&pos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
113 |
|
|
MPI_Recv(&zconsPos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
114 |
|
|
} |
115 |
|
|
|
116 |
|
|
output << globalIndexOfCurMol << "\t" << force << "\t" << pos << "\t"<< zconsPos << endl; |
117 |
|
|
|
118 |
|
|
} //End of Request Loop |
119 |
|
|
|
120 |
|
|
//Send ending request message to slave nodes |
121 |
|
|
status = EndOfRequest; |
122 |
|
|
for(int i =1; i < mpiSim->getNProcessors(); i++) |
123 |
|
|
MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); |
124 |
|
|
|
125 |
|
|
} |
126 |
|
|
else{ |
127 |
|
|
|
128 |
|
|
int whichMol; |
129 |
|
|
bool done = false; |
130 |
|
|
|
131 |
|
|
while (!done){ |
132 |
|
|
|
133 |
|
|
MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr); |
134 |
|
|
|
135 |
|
|
switch (status){ |
136 |
|
|
|
137 |
|
|
case RequesPosAndForce : |
138 |
|
|
|
139 |
|
|
MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr); |
140 |
|
|
|
141 |
|
|
for(int i = 0; i < num; i++) |
142 |
|
|
if(index[i] == whichMol){ |
143 |
|
|
localIndex = i; |
144 |
|
|
break; |
145 |
|
|
} |
146 |
|
|
|
147 |
|
|
MPI_Send(&fz[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
148 |
|
|
MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
149 |
|
|
MPI_Send(&zpos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
150 |
|
|
break; |
151 |
|
|
|
152 |
|
|
case EndOfRequest : |
153 |
|
|
|
154 |
|
|
done = true; |
155 |
|
|
break; |
156 |
|
|
} |
157 |
|
|
|
158 |
|
|
} |
159 |
|
|
|
160 |
|
|
} |
161 |
|
|
|
162 |
|
|
#endif |
163 |
|
|
|
164 |
|
|
} |
165 |
|
|
|
166 |
|
|
/* |
167 |
|
|
* |
168 |
|
|
*/ |
169 |
|
|
void ZConsWriter::writeZPos(){ |
170 |
|
|
|
171 |
|
|
#ifdef IS_MPI |
172 |
|
|
if(worldRank == 0){ |
173 |
|
|
#endif |
174 |
|
|
|
175 |
|
|
output << parameters->size() << endl; |
176 |
|
|
|
177 |
|
|
for(int i =0 ; i < (int)(parameters->size()); i++) |
178 |
|
|
output << (*parameters)[i].zconsIndex << "\t" << (*parameters)[i].zPos << endl; |
179 |
|
|
|
180 |
|
|
#ifdef IS_MPI |
181 |
|
|
} |
182 |
|
|
#endif |
183 |
|
|
} |