6 |
|
* redistribute this software in source and binary code form, provided |
7 |
|
* that the following conditions are met: |
8 |
|
* |
9 |
< |
* 1. Acknowledgement of the program authors must be made in any |
10 |
< |
* publication of scientific results based in part on use of the |
11 |
< |
* program. An acceptable form of acknowledgement is citation of |
12 |
< |
* the article in which the program was described (Matthew |
13 |
< |
* A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher |
14 |
< |
* J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented |
15 |
< |
* Parallel Simulation Engine for Molecular Dynamics," |
16 |
< |
* J. Comput. Chem. 26, pp. 252-271 (2005)) |
17 |
< |
* |
18 |
< |
* 2. Redistributions of source code must retain the above copyright |
9 |
> |
* 1. Redistributions of source code must retain the above copyright |
10 |
|
* notice, this list of conditions and the following disclaimer. |
11 |
|
* |
12 |
< |
* 3. Redistributions in binary form must reproduce the above copyright |
12 |
> |
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
* documentation and/or other materials provided with the |
15 |
|
* distribution. |
28 |
|
* arising out of the use of or inability to use software, even if the |
29 |
|
* University of Notre Dame has been advised of the possibility of |
30 |
|
* such damages. |
31 |
+ |
* |
32 |
+ |
* SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
33 |
+ |
* research, please cite the appropriate papers when you publish your |
34 |
+ |
* work. Good starting points are: |
35 |
+ |
* |
36 |
+ |
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
+ |
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
+ |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
39 |
+ |
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
+ |
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
42 |
|
#include <algorithm> |
43 |
|
#include "brains/BlockSnapshotManager.hpp" |
44 |
< |
#include "utils/residentMem.h" |
45 |
< |
#include "utils/physmem.h" |
44 |
> |
//#include "utils/residentMem.h" |
45 |
> |
//#include "utils/physmem.h" |
46 |
|
#include "utils/Algorithm.hpp" |
47 |
|
#include "brains/SimInfo.hpp" |
48 |
|
#include "io/DumpReader.hpp" |
49 |
|
|
50 |
< |
namespace oopse { |
51 |
< |
BlockSnapshotManager::BlockSnapshotManager(SimInfo* info, const std::string& filename, |
52 |
< |
int storageLayout, int blockCapacity) |
53 |
< |
: SnapshotManager(storageLayout), info_(info), blockCapacity_(blockCapacity), |
54 |
< |
activeBlocks_(blockCapacity_, -1), activeRefCount_(blockCapacity_, 0) { |
50 |
> |
namespace OpenMD { |
51 |
> |
BlockSnapshotManager::BlockSnapshotManager(SimInfo* info, |
52 |
> |
const std::string& filename, |
53 |
> |
int storageLayout, |
54 |
> |
long long int memSize, |
55 |
> |
int blockCapacity) |
56 |
> |
: SnapshotManager(storageLayout), info_(info), memSize_(memSize), |
57 |
> |
blockCapacity_(blockCapacity), activeBlocks_(blockCapacity_, -1), |
58 |
> |
activeRefCount_(blockCapacity_, 0) { |
59 |
|
|
60 |
< |
nAtoms_ = info->getNGlobalAtoms(); |
61 |
< |
nRigidBodies_ = info->getNGlobalRigidBodies(); |
60 |
> |
nAtoms_ = info->getNGlobalAtoms(); |
61 |
> |
nRigidBodies_ = info->getNGlobalRigidBodies(); |
62 |
> |
nCutoffGroups_ = info->getNCutoffGroups(); |
63 |
|
|
64 |
< |
double physMem = physmem_total(); |
65 |
< |
double rssMem = residentMem(); |
66 |
< |
double avaliablePhysMem = physMem - rssMem; |
64 |
> |
// eliminate suspect calls to figure out free memory: |
65 |
> |
// RealType physMem = physmem_total(); |
66 |
> |
// RealType rssMem = residentMem(); |
67 |
> |
// RealType avaliablePhysMem = physMem - rssMem; |
68 |
|
|
69 |
< |
std::cout << "physmem = " << int(physMem) << "\trssMem = "<< rssMem << "\t availablePhysMem = " << avaliablePhysMem <<std::endl; |
70 |
< |
int bytesPerStuntDouble = DataStorage::getBytesPerStuntDouble(storageLayout); |
69 |
> |
int bytesPerStuntDouble = DataStorage::getBytesPerStuntDouble(storageLayout); |
70 |
> |
int bytesPerCutoffGroup = DataStorage::getBytesPerStuntDouble(DataStorage::dslPosition); |
71 |
|
|
72 |
< |
int bytesPerFrame = (nRigidBodies_ + nAtoms_) * bytesPerStuntDouble; |
72 |
> |
int bytesPerFrameData = Snapshot::getFrameDataSize(); |
73 |
> |
int bytesPerFrame = (nRigidBodies_ + nAtoms_) * bytesPerStuntDouble |
74 |
> |
+ nCutoffGroups_ * bytesPerCutoffGroup |
75 |
> |
+ bytesPerFrameData; |
76 |
|
|
77 |
< |
int frameCapacity = int (avaliablePhysMem / bytesPerFrame); |
78 |
< |
|
79 |
< |
nSnapshotPerBlock_ = frameCapacity /blockCapacity_ ; |
77 |
> |
// total number of frames that can fit in memory |
78 |
> |
//RealType frameCapacity = avaliablePhysMem / bytesPerFrame; |
79 |
> |
RealType frameCapacity = (RealType) memSize_ / (RealType) bytesPerFrame; |
80 |
|
|
81 |
< |
reader_ = new DumpReader(info, filename); |
82 |
< |
nframes_ = reader_->getNFrames(); |
83 |
< |
|
84 |
< |
int nblocks = nframes_ / nSnapshotPerBlock_; |
85 |
< |
if (nframes_ % nSnapshotPerBlock_ != 0) { |
81 |
> |
// number of frames in each block given the need to hold multiple blocks |
82 |
> |
// in memory at the same time: |
83 |
> |
nSnapshotPerBlock_ = int(frameCapacity) / blockCapacity_; |
84 |
> |
if (nSnapshotPerBlock_ <= 0) { |
85 |
> |
std::cerr << "not enough memory to hold two configs!" << std::endl; |
86 |
> |
} |
87 |
> |
reader_ = new DumpReader(info, filename); |
88 |
> |
nframes_ = reader_->getNFrames(); |
89 |
> |
int nblocks = nframes_ / nSnapshotPerBlock_; |
90 |
> |
if (nframes_ % int(nSnapshotPerBlock_) != 0) { |
91 |
|
++nblocks; |
92 |
< |
} |
92 |
> |
} |
93 |
|
|
94 |
< |
for (int i = 0; i < nblocks; ++i) { |
94 |
> |
for (int i = 0; i < nblocks; ++i) { |
95 |
|
blocks_.push_back(SnapshotBlock(i*nSnapshotPerBlock_, (i+1)*nSnapshotPerBlock_)); |
96 |
< |
} |
97 |
< |
//the last block may not have nSnapshotPerBlock frames, we need to consider this special situation |
98 |
< |
blocks_.back().second = nframes_; |
96 |
> |
} |
97 |
> |
//the last block may not have nSnapshotPerBlock frames, we need to consider this special situation |
98 |
> |
blocks_.back().second = nframes_; |
99 |
|
|
100 |
< |
snapshots_.insert(snapshots_.begin(), nframes_, static_cast<Snapshot*>(NULL)); |
100 |
> |
snapshots_.insert(snapshots_.begin(), nframes_, static_cast<Snapshot*>(NULL)); |
101 |
> |
|
102 |
> |
std::cout << "-----------------------------------------------------" |
103 |
> |
<< std::endl; |
104 |
> |
std::cout << "BlockSnapshotManager memory report:" << std::endl; |
105 |
> |
std::cout << "\n"; |
106 |
> |
// std::cout << " Physical Memory available:\t" << (unsigned long)physMem << " bytes" <<std::endl; |
107 |
> |
//std::cout << " Resident Memory in use:\t" << (unsigned long)rssMem << " bytes" <<std::endl; |
108 |
> |
//std::cout << "Memory available for OpenMD:\t" << (unsigned long)avaliablePhysMem << " bytes" <<std::endl; |
109 |
> |
std::cout << "Memory requested for OpenMD:\t" |
110 |
> |
<< (unsigned long)memSize_ << " bytes" << std::endl; |
111 |
> |
std::cout << " Bytes per FrameData:\t" |
112 |
> |
<< (unsigned long)bytesPerFrameData << std::endl; |
113 |
> |
std::cout << " Bytes per StuntDouble:\t" |
114 |
> |
<< (unsigned long)bytesPerStuntDouble << std::endl; |
115 |
> |
std::cout << " Bytes per Cutoff Group:\t" |
116 |
> |
<< (unsigned long)bytesPerCutoffGroup << std::endl; |
117 |
> |
std::cout << " Bytes per Frame:\t" |
118 |
> |
<< (unsigned long)bytesPerFrame << std::endl; |
119 |
> |
std::cout << " Frame Capacity:\t" |
120 |
> |
<< (unsigned long)frameCapacity << std::endl; |
121 |
> |
std::cout << " Frames in trajectory:\t" |
122 |
> |
<< (unsigned long)nframes_ << std::endl; |
123 |
> |
std::cout << " Snapshots per Block:\t" |
124 |
> |
<< (unsigned long)nSnapshotPerBlock_ << std::endl; |
125 |
> |
std::cout << " Total number of Blocks:\t" |
126 |
> |
<< (unsigned long)nblocks << std::endl; |
127 |
> |
std::cout << "-----------------------------------------------------" |
128 |
> |
<< std::endl; |
129 |
|
|
130 |
< |
} |
130 |
> |
} |
131 |
|
|
132 |
|
|
133 |
< |
BlockSnapshotManager::~BlockSnapshotManager() { |
133 |
> |
BlockSnapshotManager::~BlockSnapshotManager() { |
134 |
|
currentSnapshot_ = NULL; |
135 |
|
previousSnapshot_ = NULL; |
136 |
|
|
138 |
|
|
139 |
|
std::vector<int>::iterator i; |
140 |
|
for (i = activeBlocks_.begin(); i != activeBlocks_.end(); ++i) { |
141 |
< |
if (*i != -1) { |
142 |
< |
unloadBlock(*i); |
143 |
< |
} |
141 |
> |
if (*i != -1) { |
142 |
> |
unloadBlock(*i); |
143 |
> |
} |
144 |
|
} |
145 |
< |
} |
145 |
> |
} |
146 |
|
|
147 |
< |
int BlockSnapshotManager::getNActiveBlocks() { |
147 |
> |
Snapshot* BlockSnapshotManager::getSnapshot(int id) { |
148 |
> |
currentSnapshot_ = snapshots_[id]; |
149 |
> |
return snapshots_[id]; |
150 |
> |
} |
151 |
> |
|
152 |
> |
int BlockSnapshotManager::getNActiveBlocks() { |
153 |
|
#ifdef __RWSTD |
154 |
|
int count = 0; |
155 |
|
std::count_if(activeBlocks_.begin(), activeBlocks_.end(), std::bind2nd(std::not_equal_to<int>(), -1), count); |
157 |
|
#else |
158 |
|
return std::count_if(activeBlocks_.begin(), activeBlocks_.end(), std::bind2nd(std::not_equal_to<int>(), -1)); |
159 |
|
#endif |
160 |
< |
} |
160 |
> |
} |
161 |
|
|
162 |
|
|
163 |
|
|
164 |
< |
bool BlockSnapshotManager::loadBlock(int block) { |
164 |
> |
bool BlockSnapshotManager::loadBlock(int block) { |
165 |
|
std::vector<int>::iterator i = findActiveBlock(block); |
166 |
|
bool loadSuccess; |
167 |
|
if (i != activeBlocks_.end()) { |
168 |
< |
//if block is already in memory, just increast the reference count |
169 |
< |
++activeRefCount_[i - activeBlocks_.begin()]; |
170 |
< |
loadSuccess = true; |
168 |
> |
//if block is already in memory, just increast the reference count |
169 |
> |
++activeRefCount_[i - activeBlocks_.begin()]; |
170 |
> |
loadSuccess = true; |
171 |
|
} else if (getNActiveBlocks() < blockCapacity_){ |
172 |
< |
//if number of active blocks is less than the block capacity, just load it |
173 |
< |
internalLoad(block); |
174 |
< |
loadSuccess = true; |
175 |
< |
} else if (hasZeroRefBlock() > 0) { |
176 |
< |
//if already reach the block capacity, need to unload a block with 0 reference |
177 |
< |
int zeroRefBlock = getFirstZeroRefBlock(); |
178 |
< |
assert(zeroRefBlock != -1); |
179 |
< |
internalUnload(zeroRefBlock); |
180 |
< |
internalLoad(block); |
172 |
> |
//if number of active blocks is less than the block capacity, just load it |
173 |
> |
internalLoad(block); |
174 |
> |
loadSuccess = true; |
175 |
> |
} else if ( hasZeroRefBlock() ) { |
176 |
> |
//if already reach the block capacity, need to unload a block with 0 reference |
177 |
> |
int zeroRefBlock = getFirstZeroRefBlock(); |
178 |
> |
assert(zeroRefBlock != -1); |
179 |
> |
internalUnload(zeroRefBlock); |
180 |
> |
internalLoad(block); |
181 |
|
} else { |
182 |
< |
//reach the capacity and all blocks in memory are not zero reference |
183 |
< |
loadSuccess = false; |
182 |
> |
//reach the capacity and all blocks in memory are not zero reference |
183 |
> |
loadSuccess = false; |
184 |
|
} |
185 |
|
|
186 |
|
return loadSuccess; |
187 |
< |
} |
187 |
> |
} |
188 |
|
|
189 |
< |
bool BlockSnapshotManager::unloadBlock(int block) { |
189 |
> |
bool BlockSnapshotManager::unloadBlock(int block) { |
190 |
|
bool unloadSuccess; |
191 |
|
std::vector<int>::iterator i = findActiveBlock(block); |
192 |
|
|
193 |
|
if (i != activeBlocks_.end()){ |
194 |
< |
--activeRefCount_[i - activeBlocks_.begin()]; |
195 |
< |
if (activeRefCount_[i - activeBlocks_.begin()] < 0) { |
196 |
< |
//in case, unloadBlock called multiple times |
197 |
< |
activeRefCount_[i - activeBlocks_.begin()] = 0; |
198 |
< |
} |
194 |
> |
--activeRefCount_[i - activeBlocks_.begin()]; |
195 |
> |
if (activeRefCount_[i - activeBlocks_.begin()] < 0) { |
196 |
> |
//in case, unloadBlock called multiple times |
197 |
> |
activeRefCount_[i - activeBlocks_.begin()] = 0; |
198 |
> |
} |
199 |
> |
|
200 |
> |
if (activeRefCount_[i-activeBlocks_.begin()] == 0) { |
201 |
> |
internalUnload(block); |
202 |
> |
} |
203 |
|
|
204 |
< |
unloadSuccess = true; |
204 |
> |
unloadSuccess = true; |
205 |
|
} else { |
206 |
< |
unloadSuccess = false; |
206 |
> |
unloadSuccess = false; |
207 |
|
} |
208 |
|
|
209 |
|
return unloadSuccess; |
210 |
< |
} |
210 |
> |
} |
211 |
|
|
212 |
< |
void BlockSnapshotManager::internalLoad(int block) { |
212 |
> |
void BlockSnapshotManager::internalLoad(int block) { |
213 |
|
|
214 |
|
for (int i = blocks_[block].first; i < blocks_[block].second; ++i) { |
215 |
< |
snapshots_[i] = loadFrame(i); |
215 |
> |
snapshots_[i] = loadFrame(i); |
216 |
|
} |
217 |
|
|
218 |
|
std::vector<int>::iterator j; |
220 |
|
assert(j != activeBlocks_.end()); |
221 |
|
*j = block; |
222 |
|
++activeRefCount_[j - activeBlocks_.begin()]; |
223 |
< |
} |
223 |
> |
} |
224 |
|
|
225 |
< |
void BlockSnapshotManager::internalUnload(int block) { |
225 |
> |
void BlockSnapshotManager::internalUnload(int block) { |
226 |
> |
std::cerr << "called internal unload for block "<< block << "\n"; |
227 |
|
for (int i = blocks_[block].first; i < blocks_[block].second; ++i) { |
228 |
< |
delete snapshots_[i]; |
229 |
< |
snapshots_[i] = NULL; |
228 |
> |
delete snapshots_[i]; |
229 |
> |
snapshots_[i] = NULL; |
230 |
|
} |
231 |
|
std::vector<int>::iterator j; |
232 |
|
j = std::find(activeBlocks_.begin(), activeBlocks_.end(), block); |
233 |
|
assert(j != activeBlocks_.end()); |
234 |
|
*j = -1; |
235 |
< |
} |
235 |
> |
} |
236 |
|
|
237 |
< |
bool BlockSnapshotManager::hasZeroRefBlock(){ |
237 |
> |
bool BlockSnapshotManager::hasZeroRefBlock(){ |
238 |
|
return std::find(activeRefCount_.begin(), activeRefCount_.end(), 0) != activeRefCount_.end() ? true : false; |
239 |
< |
} |
239 |
> |
} |
240 |
|
|
241 |
< |
int BlockSnapshotManager::getFirstZeroRefBlock(){ |
242 |
< |
std::vector<int>::iterator i = std::find(activeRefCount_.begin(), activeRefCount_.end(), 0); |
243 |
< |
return i != activeRefCount_.end() ? activeBlocks_[i - activeRefCount_.begin()] : -1; |
244 |
< |
} |
241 |
> |
int BlockSnapshotManager::getFirstZeroRefBlock(){ |
242 |
> |
std::vector<int>::iterator i = std::find(activeRefCount_.begin(), activeRefCount_.end(), 0); |
243 |
> |
return i != activeRefCount_.end() ? activeBlocks_[i - activeRefCount_.begin()] : -1; |
244 |
> |
} |
245 |
|
|
246 |
< |
std::vector<int> BlockSnapshotManager::getActiveBlocks() { |
246 |
> |
std::vector<int> BlockSnapshotManager::getActiveBlocks() { |
247 |
|
std::vector<int> result; |
248 |
< |
oopse::copy_if(activeBlocks_.begin(), activeBlocks_.end(), std::back_inserter(result), |
249 |
< |
std::bind2nd(std::not_equal_to<int>(), -1)); |
248 |
> |
OpenMD::copy_if(activeBlocks_.begin(), activeBlocks_.end(), std::back_inserter(result), |
249 |
> |
std::bind2nd(std::not_equal_to<int>(), -1)); |
250 |
|
return result; |
251 |
< |
} |
251 |
> |
} |
252 |
|
|
253 |
< |
Snapshot* BlockSnapshotManager::loadFrame(int frame){ |
254 |
< |
Snapshot* snapshot = new Snapshot(nAtoms_, nRigidBodies_, getStorageLayout()); |
253 |
> |
Snapshot* BlockSnapshotManager::loadFrame(int frame){ |
254 |
> |
Snapshot* snapshot = new Snapshot(nAtoms_, nRigidBodies_, nCutoffGroups_, |
255 |
> |
getStorageLayout()); |
256 |
|
snapshot->setID(frame); |
257 |
+ |
snapshot->clearDerivedProperties(); |
258 |
|
|
204 |
– |
/** @todo fixed me */ |
205 |
– |
Snapshot* oldSnapshot = currentSnapshot_; |
259 |
|
currentSnapshot_ = snapshot; |
260 |
|
reader_->readFrame(frame); |
261 |
< |
currentSnapshot_ = oldSnapshot; |
261 |
> |
|
262 |
|
return snapshot; |
263 |
< |
} |
263 |
> |
} |
264 |
|
|
265 |
< |
int BlockSnapshotManager::getNFrames() { |
265 |
> |
int BlockSnapshotManager::getNFrames() { |
266 |
|
return reader_->getNFrames(); |
267 |
< |
} |
267 |
> |
} |
268 |
|
|
269 |
+ |
void BlockSnapshotManager::needCOMprops(bool ncp) { |
270 |
+ |
reader_->setNeedCOMprops(ncp); |
271 |
+ |
} |
272 |
+ |
|
273 |
|
} |