6 |
|
* redistribute this software in source and binary code form, provided |
7 |
|
* that the following conditions are met: |
8 |
|
* |
9 |
< |
* 1. Acknowledgement of the program authors must be made in any |
10 |
< |
* publication of scientific results based in part on use of the |
11 |
< |
* program. An acceptable form of acknowledgement is citation of |
12 |
< |
* the article in which the program was described (Matthew |
13 |
< |
* A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher |
14 |
< |
* J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented |
15 |
< |
* Parallel Simulation Engine for Molecular Dynamics," |
16 |
< |
* J. Comput. Chem. 26, pp. 252-271 (2005)) |
17 |
< |
* |
18 |
< |
* 2. Redistributions of source code must retain the above copyright |
9 |
> |
* 1. Redistributions of source code must retain the above copyright |
10 |
|
* notice, this list of conditions and the following disclaimer. |
11 |
|
* |
12 |
< |
* 3. Redistributions in binary form must reproduce the above copyright |
12 |
> |
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
* documentation and/or other materials provided with the |
15 |
|
* distribution. |
28 |
|
* arising out of the use of or inability to use software, even if the |
29 |
|
* University of Notre Dame has been advised of the possibility of |
30 |
|
* such damages. |
31 |
+ |
* |
32 |
+ |
* SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
33 |
+ |
* research, please cite the appropriate papers when you publish your |
34 |
+ |
* work. Good starting points are: |
35 |
+ |
* |
36 |
+ |
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
+ |
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
+ |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
+ |
* [4] Vardeman & Gezelter, in progress (2009). |
40 |
|
*/ |
41 |
|
|
42 |
|
|
43 |
+ |
#include <string> |
44 |
+ |
#include <sstream> |
45 |
|
#include <iostream> |
46 |
|
|
47 |
|
#include "io/RestWriter.hpp" |
51 |
|
#include <mpi.h> |
52 |
|
#endif |
53 |
|
|
54 |
< |
namespace oopse { |
54 |
> |
namespace OpenMD { |
55 |
|
RestWriter::RestWriter(SimInfo* info, const std::string& filename, |
56 |
|
std::vector<Restraint*> restraints ) : |
57 |
|
info_(info){ |
58 |
< |
|
59 |
< |
//use master - slave mode, only master node writes to disk |
58 |
> |
createRestFile_ = true; |
59 |
> |
|
60 |
|
#ifdef IS_MPI |
61 |
|
if(worldRank == 0){ |
62 |
|
#endif |
63 |
< |
|
64 |
< |
output_.open(filename.c_str()); |
65 |
< |
|
63 |
> |
|
64 |
> |
output_ = new std::ofstream(filename.c_str()); |
65 |
> |
|
66 |
|
if(!output_){ |
67 |
|
sprintf( painCave.errMsg, |
68 |
|
"Could not open %s for restraint output.\n", |
71 |
|
simError(); |
72 |
|
} |
73 |
|
|
74 |
< |
output_ << "#time\t"; |
75 |
< |
|
76 |
< |
// TODO: get Restraint info from slave nodes: |
75 |
< |
std::vector<Restraint*>::const_iterator resti; |
76 |
< |
for(resti=restraints.begin(); resti != restraints.end(); ++resti){ |
74 |
> |
#ifdef IS_MPI |
75 |
> |
} |
76 |
> |
#endif // is_mpi |
77 |
|
|
78 |
< |
if ((*resti)->getPrintRestraint()) { |
79 |
< |
std::string myName = (*resti)->getRestraintName(); |
80 |
< |
int myType = (*resti)->getRestraintType(); |
78 |
> |
|
79 |
> |
#ifdef IS_MPI |
80 |
> |
MPI_Status istatus; |
81 |
> |
#endif |
82 |
> |
|
83 |
> |
#ifndef IS_MPI |
84 |
> |
|
85 |
> |
(*output_) << "#time\t"; |
86 |
> |
|
87 |
> |
std::vector<Restraint*>::const_iterator resti; |
88 |
> |
|
89 |
> |
for(resti=restraints.begin(); resti != restraints.end(); ++resti){ |
90 |
> |
if ((*resti)->getPrintRestraint()) { |
91 |
> |
|
92 |
> |
std::string myName = (*resti)->getRestraintName(); |
93 |
> |
int myType = (*resti)->getRestraintType(); |
94 |
> |
|
95 |
> |
(*output_) << myName << ":"; |
96 |
> |
|
97 |
> |
if (myType & Restraint::rtDisplacement) |
98 |
> |
(*output_) << "\tPosition(angstroms)\tEnergy(kcal/mol)"; |
99 |
> |
|
100 |
> |
if (myType & Restraint::rtTwist) |
101 |
> |
(*output_) << "\tTwistAngle(radians)\tEnergy(kcal/mol)"; |
102 |
> |
|
103 |
> |
if (myType & Restraint::rtSwingX) |
104 |
> |
(*output_) << "\tSwingXAngle(radians)\tEnergy(kcal/mol)"; |
105 |
|
|
106 |
< |
output_ << myName << ":"; |
107 |
< |
|
84 |
< |
if (myType & Restraint::rtDisplacement) |
85 |
< |
output_ << "\tPosition(angstroms)\tEnergy(kcal/mol)"; |
86 |
< |
|
87 |
< |
if (myType & Restraint::rtTwist) |
88 |
< |
output_ << "\tTwistAngle(radians)\tEnergy(kcal/mol)"; |
89 |
< |
|
90 |
< |
if (myType & Restraint::rtSwingX) |
91 |
< |
output_ << "\tSwingXAngle(radians)\tEnergy(kcal/mol)"; |
92 |
< |
|
93 |
< |
if (myType & Restraint::rtSwingY) |
94 |
< |
output_ << "\tSwingYAngle(radians)\tEnergy(kcal/mol)"; |
95 |
< |
|
96 |
< |
} |
106 |
> |
if (myType & Restraint::rtSwingY) |
107 |
> |
(*output_) << "\tSwingYAngle(radians)\tEnergy(kcal/mol)"; |
108 |
|
} |
98 |
– |
output_ << "\n"; |
99 |
– |
#ifdef IS_MPI |
109 |
|
} |
110 |
< |
#endif |
111 |
< |
} |
110 |
> |
|
111 |
> |
(*output_) << "\n"; |
112 |
> |
(*output_).flush(); |
113 |
> |
|
114 |
> |
#else |
115 |
> |
|
116 |
> |
std::string buffer; |
117 |
> |
|
118 |
> |
std::vector<Restraint*>::const_iterator resti; |
119 |
> |
|
120 |
> |
for(resti=restraints.begin(); resti != restraints.end(); ++resti){ |
121 |
> |
if ((*resti)->getPrintRestraint()) { |
122 |
> |
|
123 |
> |
std::string myName = (*resti)->getRestraintName(); |
124 |
> |
int myType = (*resti)->getRestraintType(); |
125 |
> |
|
126 |
> |
buffer += (myName + ":"); |
127 |
> |
|
128 |
> |
if (myType & Restraint::rtDisplacement) |
129 |
> |
buffer += "\tPosition(angstroms)\tEnergy(kcal/mol)"; |
130 |
> |
|
131 |
> |
if (myType & Restraint::rtTwist) |
132 |
> |
buffer += "\tTwistAngle(radians)\tEnergy(kcal/mol)"; |
133 |
> |
|
134 |
> |
if (myType & Restraint::rtSwingX) |
135 |
> |
buffer += "\tSwingXAngle(radians)\tEnergy(kcal/mol)"; |
136 |
> |
|
137 |
> |
if (myType & Restraint::rtSwingY) |
138 |
> |
buffer += "\tSwingYAngle(radians)\tEnergy(kcal/mol)"; |
139 |
> |
|
140 |
> |
buffer += "\n"; |
141 |
> |
} |
142 |
> |
} |
143 |
> |
|
144 |
> |
const int masterNode = 0; |
145 |
> |
|
146 |
> |
if (worldRank == masterNode) { |
147 |
> |
(*output_) << "#time\t"; |
148 |
> |
(*output_) << buffer; |
149 |
> |
|
150 |
> |
int nProc; |
151 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &nProc); |
152 |
> |
for (int i = 1; i < nProc; ++i) { |
153 |
> |
|
154 |
> |
// receive the length of the string buffer that was |
155 |
> |
// prepared by processor i |
156 |
> |
|
157 |
> |
int recvLength; |
158 |
> |
MPI_Recv(&recvLength, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &istatus); |
159 |
> |
char* recvBuffer = new char[recvLength]; |
160 |
> |
if (recvBuffer == NULL) { |
161 |
> |
} else { |
162 |
> |
MPI_Recv(recvBuffer, recvLength, MPI_CHAR, i, 0, MPI_COMM_WORLD, |
163 |
> |
&istatus); |
164 |
> |
(*output_) << recvBuffer; |
165 |
> |
delete [] recvBuffer; |
166 |
> |
} |
167 |
> |
} |
168 |
> |
(*output_).flush(); |
169 |
> |
} else { |
170 |
> |
int sendBufferLength = buffer.size() + 1; |
171 |
> |
MPI_Send(&sendBufferLength, 1, MPI_INT, masterNode, 0, MPI_COMM_WORLD); |
172 |
> |
MPI_Send((void *)buffer.c_str(), sendBufferLength, MPI_CHAR, masterNode, |
173 |
> |
0, MPI_COMM_WORLD); |
174 |
> |
} |
175 |
> |
|
176 |
> |
#endif // is_mpi |
177 |
> |
|
178 |
> |
} |
179 |
|
|
180 |
< |
RestWriter::~RestWriter() { |
180 |
> |
void RestWriter::writeRest(std::vector<std::map<int, Restraint::RealPair> > restInfo) { |
181 |
> |
|
182 |
|
#ifdef IS_MPI |
183 |
< |
if(worldRank == 0 ){ |
107 |
< |
#endif |
108 |
< |
output_.close(); |
109 |
< |
#ifdef IS_MPI |
110 |
< |
} |
183 |
> |
MPI_Status istatus; |
184 |
|
#endif |
112 |
– |
} |
113 |
– |
|
114 |
– |
void RestWriter::writeRest(std::vector<std::map<int, Restraint::RealPair> > restInfo){ |
115 |
– |
|
185 |
|
|
186 |
< |
output_ << info_->getSnapshotManager()->getCurrentSnapshot()->getTime(); |
187 |
< |
|
186 |
> |
#ifndef IS_MPI |
187 |
> |
(*output_) << info_->getSnapshotManager()->getCurrentSnapshot()->getTime(); |
188 |
> |
|
189 |
|
// output some information about the molecules |
190 |
|
std::vector<std::map<int, Restraint::RealPair> >::const_iterator i; |
191 |
|
std::map<int, Restraint::RealPair>::const_iterator j; |
192 |
+ |
|
193 |
|
for( i = restInfo.begin(); i != restInfo.end(); ++i){ |
194 |
|
for(j = (*i).begin(); j != (*i).end(); ++j){ |
195 |
< |
output_ << "\t" << (j->second).first << "\t" << (j->second).second; |
195 |
> |
(*output_) << "\t" << (j->second).first << "\t" << (j->second).second; |
196 |
|
} |
197 |
< |
output_ << std::endl; |
197 |
> |
(*output_) << std::endl; |
198 |
|
} |
199 |
+ |
(*output_).flush(); |
200 |
+ |
#else |
201 |
+ |
std::string buffer, first, second; |
202 |
+ |
std::stringstream ss; |
203 |
+ |
|
204 |
+ |
std::vector<std::map<int, Restraint::RealPair> >::const_iterator i; |
205 |
+ |
std::map<int, Restraint::RealPair>::const_iterator j; |
206 |
+ |
|
207 |
+ |
for( i = restInfo.begin(); i != restInfo.end(); ++i){ |
208 |
+ |
for(j = (*i).begin(); j != (*i).end(); ++j){ |
209 |
+ |
ss.clear(); |
210 |
+ |
ss << (j->second).first; |
211 |
+ |
ss >> first; |
212 |
+ |
ss.clear(); |
213 |
+ |
ss << (j->second).second; |
214 |
+ |
ss >> second; |
215 |
+ |
buffer += ("\t" + first + "\t" + second); |
216 |
+ |
} |
217 |
+ |
buffer += "\n"; |
218 |
+ |
} |
219 |
+ |
|
220 |
+ |
const int masterNode = 0; |
221 |
+ |
|
222 |
+ |
if (worldRank == masterNode) { |
223 |
+ |
(*output_) << info_->getSnapshotManager()->getCurrentSnapshot()->getTime(); |
224 |
+ |
(*output_) << buffer; |
225 |
+ |
|
226 |
+ |
int nProc; |
227 |
+ |
MPI_Comm_size(MPI_COMM_WORLD, &nProc); |
228 |
+ |
for (int i = 1; i < nProc; ++i) { |
229 |
+ |
|
230 |
+ |
// receive the length of the string buffer that was |
231 |
+ |
// prepared by processor i |
232 |
+ |
|
233 |
+ |
int recvLength; |
234 |
+ |
MPI_Recv(&recvLength, 1, MPI_INT, i, 0, MPI_COMM_WORLD, &istatus); |
235 |
+ |
char* recvBuffer = new char[recvLength]; |
236 |
+ |
if (recvBuffer == NULL) { |
237 |
+ |
} else { |
238 |
+ |
MPI_Recv(recvBuffer, recvLength, MPI_CHAR, i, 0, MPI_COMM_WORLD, |
239 |
+ |
&istatus); |
240 |
+ |
(*output_) << recvBuffer; |
241 |
+ |
|
242 |
+ |
delete [] recvBuffer; |
243 |
+ |
} |
244 |
+ |
} |
245 |
+ |
(*output_).flush(); |
246 |
+ |
} else { |
247 |
+ |
int sendBufferLength = buffer.size() + 1; |
248 |
+ |
MPI_Send(&sendBufferLength, 1, MPI_INT, masterNode, 0, MPI_COMM_WORLD); |
249 |
+ |
MPI_Send((void *)buffer.c_str(), sendBufferLength, MPI_CHAR, masterNode, |
250 |
+ |
0, MPI_COMM_WORLD); |
251 |
+ |
} |
252 |
+ |
#endif // is_mpi |
253 |
|
} |
254 |
|
|
130 |
– |
}// end oopse |
255 |
|
|
256 |
+ |
RestWriter::~RestWriter() { |
257 |
+ |
|
258 |
+ |
#ifdef IS_MPI |
259 |
+ |
|
260 |
+ |
if (worldRank == 0) { |
261 |
+ |
#endif // is_mpi |
262 |
+ |
if (createRestFile_){ |
263 |
+ |
writeClosing(*output_); |
264 |
+ |
delete output_; |
265 |
+ |
} |
266 |
+ |
#ifdef IS_MPI |
267 |
+ |
} |
268 |
+ |
#endif // is_mpi |
269 |
+ |
} |
270 |
+ |
|
271 |
+ |
void RestWriter::writeClosing(std::ostream& os) { |
272 |
+ |
os.flush(); |
273 |
+ |
} |
274 |
+ |
|
275 |
+ |
}// end namespace OpenMD |
276 |
+ |
|