18 |
|
|
19 |
|
if(!output){ |
20 |
|
sprintf( painCave.errMsg, |
21 |
< |
"Could not open \"s\" for z constrain output \n", |
21 |
> |
"Could not open %s for z constrain output \n", |
22 |
|
filename); |
23 |
|
painCave.isFatal = 1; |
24 |
|
simError(); |
25 |
|
} |
26 |
|
output << "#number of z constrain molecules" << endl; |
27 |
|
output << "#global Index of molecule\tzPos" << endl; |
28 |
< |
output << "every frame will contain below data" <<endl; |
28 |
> |
output << "#every frame will contain below data" <<endl; |
29 |
|
output << "#time(fs)" << endl; |
30 |
|
output << "#number of fixed z-constrain molecules" << endl; |
31 |
|
output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl; |
54 |
|
/** |
55 |
|
* |
56 |
|
*/ |
57 |
< |
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos){ |
57 |
> |
void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos, double* zpos){ |
58 |
|
|
59 |
|
#ifndef IS_MPI |
60 |
|
output << time << endl; |
61 |
|
output << num << endl; |
62 |
|
|
63 |
|
for(int i = 0; i < num; i++) |
64 |
< |
output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << endl; |
64 |
> |
output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << "\t" << zpos[i] <<endl; |
65 |
|
|
66 |
|
#else |
67 |
|
int totalNum; |
76 |
|
enum CommType { RequesPosAndForce, EndOfRequest} status; |
77 |
|
double pos; |
78 |
|
double force; |
79 |
+ |
double zconsPos; |
80 |
|
int localIndex; |
81 |
|
MPI_Status ierr; |
82 |
|
int tag = 0; |
87 |
|
int *MolToProcMap; |
88 |
|
MolToProcMap = mpiSim->getMolToProcMap(); |
89 |
|
|
90 |
< |
for(int i = 0; i < parameters->size(); i++){ |
90 |
> |
for(int i = 0; i < (int)(parameters->size()); i++){ |
91 |
|
|
92 |
|
globalIndexOfCurMol = (*parameters)[i].zconsIndex; |
93 |
|
whichNode = MolToProcMap[globalIndexOfCurMol]; |
108 |
|
status = RequesPosAndForce; |
109 |
|
MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
110 |
|
MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
111 |
< |
MPI_Recv(&force, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); |
112 |
< |
MPI_Recv(&pos, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); |
111 |
> |
MPI_Recv(&force, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
112 |
> |
MPI_Recv(&pos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
113 |
> |
MPI_Recv(&zconsPos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
114 |
|
} |
115 |
|
|
116 |
< |
output << globalIndexOfCurMol << "\t" << force << "\t" << pos << endl; |
116 |
> |
output << globalIndexOfCurMol << "\t" << force << "\t" << pos << "\t"<< zconsPos << endl; |
117 |
|
|
118 |
|
} //End of Request Loop |
119 |
|
|
120 |
|
//Send ending request message to slave nodes |
121 |
|
status = EndOfRequest; |
122 |
< |
for(int i =1; i < mpiSim->getNumberProcessors(); i++) |
122 |
> |
for(int i =1; i < mpiSim->getNProcessors(); i++) |
123 |
|
MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); |
124 |
|
|
125 |
|
} |
144 |
|
break; |
145 |
|
} |
146 |
|
|
147 |
< |
MPI_Send(&fz[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD); |
148 |
< |
MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD); |
147 |
> |
MPI_Send(&fz[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
148 |
> |
MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
149 |
> |
MPI_Send(&zpos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
150 |
|
break; |
151 |
|
|
152 |
|
case EndOfRequest : |
174 |
|
|
175 |
|
output << parameters->size() << endl; |
176 |
|
|
177 |
< |
for(int i =0 ; i < parameters->size(); i++) |
177 |
> |
for(int i =0 ; i < (int)(parameters->size()); i++) |
178 |
|
output << (*parameters)[i].zconsIndex << "\t" << (*parameters)[i].zPos << endl; |
179 |
|
|
180 |
|
#ifdef IS_MPI |