6 |
|
* redistribute this software in source and binary code form, provided |
7 |
|
* that the following conditions are met: |
8 |
|
* |
9 |
< |
* 1. Acknowledgement of the program authors must be made in any |
10 |
< |
* publication of scientific results based in part on use of the |
11 |
< |
* program. An acceptable form of acknowledgement is citation of |
12 |
< |
* the article in which the program was described (Matthew |
13 |
< |
* A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher |
14 |
< |
* J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented |
15 |
< |
* Parallel Simulation Engine for Molecular Dynamics," |
16 |
< |
* J. Comput. Chem. 26, pp. 252-271 (2005)) |
17 |
< |
* |
18 |
< |
* 2. Redistributions of source code must retain the above copyright |
9 |
> |
* 1. Redistributions of source code must retain the above copyright |
10 |
|
* notice, this list of conditions and the following disclaimer. |
11 |
|
* |
12 |
< |
* 3. Redistributions in binary form must reproduce the above copyright |
12 |
> |
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
* documentation and/or other materials provided with the |
15 |
|
* distribution. |
28 |
|
* arising out of the use of or inability to use software, even if the |
29 |
|
* University of Notre Dame has been advised of the possibility of |
30 |
|
* such damages. |
31 |
+ |
* |
32 |
+ |
* SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
33 |
+ |
* research, please cite the appropriate papers when you publish your |
34 |
+ |
* work. Good starting points are: |
35 |
+ |
* |
36 |
+ |
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
+ |
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
+ |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
+ |
* [4] Vardeman & Gezelter, in progress (2009). |
40 |
|
*/ |
41 |
|
|
42 |
|
#include <cmath> |
46 |
|
#include "math/Polynomial.hpp" |
47 |
|
#include "primitives/Molecule.hpp" |
48 |
|
#include "primitives/StuntDouble.hpp" |
49 |
< |
#include "utils/OOPSEConstant.hpp" |
49 |
> |
#include "utils/PhysicalConstants.hpp" |
50 |
|
#include "utils/Tuple.hpp" |
51 |
|
|
52 |
|
#ifndef IS_MPI |
57 |
|
|
58 |
|
#define HONKING_LARGE_VALUE 1.0e10 |
59 |
|
|
60 |
< |
namespace oopse { |
60 |
> |
namespace OpenMD { |
61 |
|
|
62 |
|
RNEMD::RNEMD(SimInfo* info) : info_(info), evaluator_(info), seleMan_(info), usePeriodicBoundaryConditions_(info->getSimParams()->getUsePeriodicBoundaryConditions()) { |
63 |
|
|
154 |
|
midBin_ = nBins_ / 2; |
155 |
|
if (simParams->haveRNEMD_logWidth()) { |
156 |
|
rnemdLogWidth_ = simParams->getRNEMD_logWidth(); |
157 |
< |
if (rnemdLogWidth_ != nBins_ || rnemdLogWidth_ != midBin_ + 1) { |
157 |
> |
if (rnemdLogWidth_ != nBins_ && rnemdLogWidth_ != midBin_ + 1) { |
158 |
|
std::cerr << "WARNING! RNEMD_logWidth has abnormal value!\n"; |
159 |
|
std::cerr << "Automaically set back to default.\n"; |
160 |
|
rnemdLogWidth_ = nBins_; |
194 |
|
|
195 |
|
RNEMD::~RNEMD() { |
196 |
|
delete randNumGen_; |
197 |
< |
|
198 |
< |
std::cerr << "total fail trials: " << failTrialCount_ << "\n"; |
197 |
> |
|
198 |
|
#ifdef IS_MPI |
199 |
|
if (worldRank == 0) { |
200 |
|
#endif |
201 |
+ |
std::cerr << "total fail trials: " << failTrialCount_ << "\n"; |
202 |
|
rnemdLog_.close(); |
203 |
|
if (rnemdType_ == rnemdKineticScale || rnemdType_ == rnemdPxScale || rnemdType_ == rnemdPyScale) |
204 |
|
std::cerr<< "total root-checking warnings: " << failRootCount_ << "\n"; |
279 |
|
} |
280 |
|
//make exchangeSum_ comparable between swap & scale |
281 |
|
//temporarily without using energyConvert |
282 |
< |
//value = value * 0.5 / OOPSEConstant::energyConvert; |
282 |
> |
//value = value * 0.5 / PhysicalConstants::energyConvert; |
283 |
|
value *= 0.5; |
284 |
|
break; |
285 |
|
case rnemdPx : |
642 |
|
a001 = Kcx * px * px; |
643 |
|
*/ |
644 |
|
|
645 |
< |
//scale all three dimensions, let x = y |
645 |
> |
//scale all three dimensions, let c_x = c_y |
646 |
|
a000 = Kcx + Kcy; |
647 |
|
a110 = Kcz; |
648 |
|
c0 = targetFlux_ - Kcx - Kcy - Kcz; |
678 |
|
+ Khy * (fastpow(c * py - py - 1.0, 2) - 1.0); |
679 |
|
break; |
680 |
|
case rnemdPzScale ://we don't really do this, do we? |
681 |
+ |
c = 1 - targetFlux_ / Pcz; |
682 |
+ |
a000 = Kcx; |
683 |
+ |
a110 = Kcy; |
684 |
+ |
c0 = Kcz * c * c - Kcx - Kcy - Kcz; |
685 |
+ |
a001 = px * px * Khx; |
686 |
+ |
a111 = py * py * Khy; |
687 |
+ |
b01 = -2.0 * Khx * px * (1.0 + px); |
688 |
+ |
b11 = -2.0 * Khy * py * (1.0 + py); |
689 |
+ |
c1 = Khx * px * (2.0 + px) + Khy * py * (2.0 + py) |
690 |
+ |
+ Khz * (fastpow(c * pz - pz - 1.0, 2) - 1.0); |
691 |
+ |
break; |
692 |
|
default : |
693 |
|
break; |
694 |
|
} |
732 |
|
r2 = *ri; |
733 |
|
//check if FindRealRoots() give the right answer |
734 |
|
if ( fabs(u0 + r2 * (u1 + r2 * (u2 + r2 * (u3 + r2 * u4)))) > 1e-6 ) { |
735 |
< |
std::cerr << "WARNING! eq solvers might have mistakes!\n"; |
735 |
> |
sprintf(painCave.errMsg, |
736 |
> |
"RNEMD Warning: polynomial solve seems to have an error!"); |
737 |
> |
painCave.isFatal = 0; |
738 |
> |
simError(); |
739 |
|
failRootCount_++; |
740 |
|
} |
741 |
|
//might not be useful w/o rescaling coefficients |
811 |
|
z = bestPair.second; |
812 |
|
break; |
813 |
|
case rnemdPzScale : |
814 |
+ |
x = bestPair.first; |
815 |
+ |
y = bestPair.second; |
816 |
+ |
z = c; |
817 |
+ |
break; |
818 |
|
default : |
819 |
|
break; |
820 |
|
} |
841 |
|
exchangeSum_ += targetFlux_; |
842 |
|
//we may want to check whether the exchange has been successful |
843 |
|
} else { |
844 |
< |
std::cerr << "exchange NOT performed!\n"; |
844 |
> |
std::cerr << "exchange NOT performed!\n";//MPI incompatible |
845 |
|
failTrialCount_++; |
846 |
|
} |
847 |
|
|
933 |
|
valueCount_[binNo] +=3; |
934 |
|
} |
935 |
|
} |
936 |
< |
value = value / OOPSEConstant::energyConvert / OOPSEConstant::kb; |
936 |
> |
value = value / PhysicalConstants::energyConvert / PhysicalConstants::kb; |
937 |
|
|
938 |
|
break; |
939 |
|
case rnemdPx : |
940 |
|
case rnemdPxScale : |
941 |
|
value = mass * vel[0]; |
942 |
|
valueCount_[binNo]++; |
943 |
< |
xVal = mass * vel.x() * vel.x() / OOPSEConstant::energyConvert |
944 |
< |
/ OOPSEConstant::kb; |
945 |
< |
yVal = mass * vel.y() * vel.y() / OOPSEConstant::energyConvert |
946 |
< |
/ OOPSEConstant::kb; |
947 |
< |
zVal = mass * vel.z() * vel.z() / OOPSEConstant::energyConvert |
948 |
< |
/ OOPSEConstant::kb; |
943 |
> |
xVal = mass * vel.x() * vel.x() / PhysicalConstants::energyConvert |
944 |
> |
/ PhysicalConstants::kb; |
945 |
> |
yVal = mass * vel.y() * vel.y() / PhysicalConstants::energyConvert |
946 |
> |
/ PhysicalConstants::kb; |
947 |
> |
zVal = mass * vel.z() * vel.z() / PhysicalConstants::energyConvert |
948 |
> |
/ PhysicalConstants::kb; |
949 |
|
xTempHist_[binNo] += xVal; |
950 |
|
yTempHist_[binNo] += yVal; |
951 |
|
zTempHist_[binNo] += zVal; |
983 |
|
|
984 |
|
stat[Stats::RNEMD_EXCHANGE_TOTAL] = exchangeSum_; |
985 |
|
//or to be more meaningful, define another item as exchangeSum_ / time |
986 |
+ |
int j; |
987 |
|
|
969 |
– |
|
988 |
|
#ifdef IS_MPI |
989 |
|
|
990 |
|
// all processors have the same number of bins, and STL vectors pack their |
994 |
|
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
995 |
|
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &valueCount_[0], |
996 |
|
rnemdLogWidth_, MPI::INT, MPI::SUM); |
997 |
< |
|
997 |
> |
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale) { |
998 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &xTempHist_[0], |
999 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1000 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &yTempHist_[0], |
1001 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1002 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &zTempHist_[0], |
1003 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1004 |
> |
} |
1005 |
|
// If we're the root node, should we print out the results |
1006 |
|
int worldRank = MPI::COMM_WORLD.Get_rank(); |
1007 |
|
if (worldRank == 0) { |
1008 |
|
#endif |
984 |
– |
int j; |
1009 |
|
rnemdLog_ << time; |
1010 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1011 |
|
rnemdLog_ << "\t" << valueHist_[j] / (RealType)valueCount_[j]; |
988 |
– |
valueHist_[j] = 0.0; |
1012 |
|
} |
1013 |
|
rnemdLog_ << "\n"; |
1014 |
|
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale ) { |
1015 |
|
xTempLog_ << time; |
1016 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1017 |
|
xTempLog_ << "\t" << xTempHist_[j] / (RealType)valueCount_[j]; |
995 |
– |
xTempHist_[j] = 0.0; |
1018 |
|
} |
1019 |
|
xTempLog_ << "\n"; |
1020 |
|
yTempLog_ << time; |
1021 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1022 |
|
yTempLog_ << "\t" << yTempHist_[j] / (RealType)valueCount_[j]; |
1001 |
– |
yTempHist_[j] = 0.0; |
1023 |
|
} |
1024 |
|
yTempLog_ << "\n"; |
1025 |
|
zTempLog_ << time; |
1026 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1027 |
|
zTempLog_ << "\t" << zTempHist_[j] / (RealType)valueCount_[j]; |
1007 |
– |
zTempHist_[j] = 0.0; |
1028 |
|
} |
1029 |
|
zTempLog_ << "\n"; |
1030 |
|
} |
1011 |
– |
for (j = 0; j < rnemdLogWidth_; j++) valueCount_[j] = 0; |
1031 |
|
#ifdef IS_MPI |
1032 |
|
} |
1033 |
|
#endif |
1034 |
+ |
for (j = 0; j < rnemdLogWidth_; j++) { |
1035 |
+ |
valueCount_[j] = 0; |
1036 |
+ |
valueHist_[j] = 0.0; |
1037 |
+ |
} |
1038 |
+ |
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale) |
1039 |
+ |
for (j = 0; j < rnemdLogWidth_; j++) { |
1040 |
+ |
xTempHist_[j] = 0.0; |
1041 |
+ |
yTempHist_[j] = 0.0; |
1042 |
+ |
zTempHist_[j] = 0.0; |
1043 |
+ |
} |
1044 |
|
} |
1016 |
– |
|
1045 |
|
} |