6 |
|
* redistribute this software in source and binary code form, provided |
7 |
|
* that the following conditions are met: |
8 |
|
* |
9 |
< |
* 1. Acknowledgement of the program authors must be made in any |
10 |
< |
* publication of scientific results based in part on use of the |
11 |
< |
* program. An acceptable form of acknowledgement is citation of |
12 |
< |
* the article in which the program was described (Matthew |
13 |
< |
* A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher |
14 |
< |
* J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented |
15 |
< |
* Parallel Simulation Engine for Molecular Dynamics," |
16 |
< |
* J. Comput. Chem. 26, pp. 252-271 (2005)) |
17 |
< |
* |
18 |
< |
* 2. Redistributions of source code must retain the above copyright |
9 |
> |
* 1. Redistributions of source code must retain the above copyright |
10 |
|
* notice, this list of conditions and the following disclaimer. |
11 |
|
* |
12 |
< |
* 3. Redistributions in binary form must reproduce the above copyright |
12 |
> |
* 2. Redistributions in binary form must reproduce the above copyright |
13 |
|
* notice, this list of conditions and the following disclaimer in the |
14 |
|
* documentation and/or other materials provided with the |
15 |
|
* distribution. |
28 |
|
* arising out of the use of or inability to use software, even if the |
29 |
|
* University of Notre Dame has been advised of the possibility of |
30 |
|
* such damages. |
31 |
+ |
* |
32 |
+ |
* SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your |
33 |
+ |
* research, please cite the appropriate papers when you publish your |
34 |
+ |
* work. Good starting points are: |
35 |
+ |
* |
36 |
+ |
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
+ |
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
+ |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
+ |
* [4] Vardeman & Gezelter, in progress (2009). |
40 |
|
*/ |
41 |
|
|
42 |
|
#include <cmath> |
46 |
|
#include "math/Polynomial.hpp" |
47 |
|
#include "primitives/Molecule.hpp" |
48 |
|
#include "primitives/StuntDouble.hpp" |
49 |
< |
#include "utils/OOPSEConstant.hpp" |
49 |
> |
#include "utils/PhysicalConstants.hpp" |
50 |
|
#include "utils/Tuple.hpp" |
51 |
|
|
52 |
|
#ifndef IS_MPI |
53 |
|
#include "math/SeqRandNumGen.hpp" |
54 |
|
#else |
55 |
+ |
#include <mpi.h> |
56 |
|
#include "math/ParallelRandNumGen.hpp" |
57 |
|
#endif |
58 |
|
|
59 |
|
#define HONKING_LARGE_VALUE 1.0e10 |
60 |
|
|
61 |
< |
namespace oopse { |
61 |
> |
namespace OpenMD { |
62 |
|
|
63 |
|
RNEMD::RNEMD(SimInfo* info) : info_(info), evaluator_(info), seleMan_(info), usePeriodicBoundaryConditions_(info->getSimParams()->getUsePeriodicBoundaryConditions()) { |
64 |
|
|
155 |
|
midBin_ = nBins_ / 2; |
156 |
|
if (simParams->haveRNEMD_logWidth()) { |
157 |
|
rnemdLogWidth_ = simParams->getRNEMD_logWidth(); |
158 |
< |
if (rnemdLogWidth_ != nBins_ || rnemdLogWidth_ != midBin_ + 1) { |
158 |
> |
if (rnemdLogWidth_ != nBins_ && rnemdLogWidth_ != midBin_ + 1) { |
159 |
|
std::cerr << "WARNING! RNEMD_logWidth has abnormal value!\n"; |
160 |
|
std::cerr << "Automaically set back to default.\n"; |
161 |
|
rnemdLogWidth_ = nBins_; |
195 |
|
|
196 |
|
RNEMD::~RNEMD() { |
197 |
|
delete randNumGen_; |
198 |
< |
|
198 |
< |
std::cerr << "total fail trials: " << failTrialCount_ << "\n"; |
198 |
> |
|
199 |
|
#ifdef IS_MPI |
200 |
|
if (worldRank == 0) { |
201 |
|
#endif |
202 |
+ |
std::cerr << "total fail trials: " << failTrialCount_ << "\n"; |
203 |
|
rnemdLog_.close(); |
204 |
|
if (rnemdType_ == rnemdKineticScale || rnemdType_ == rnemdPxScale || rnemdType_ == rnemdPyScale) |
205 |
|
std::cerr<< "total root-checking warnings: " << failRootCount_ << "\n"; |
280 |
|
} |
281 |
|
//make exchangeSum_ comparable between swap & scale |
282 |
|
//temporarily without using energyConvert |
283 |
< |
//value = value * 0.5 / OOPSEConstant::energyConvert; |
283 |
> |
//value = value * 0.5 / PhysicalConstants::energyConvert; |
284 |
|
value *= 0.5; |
285 |
|
break; |
286 |
|
case rnemdPx : |
643 |
|
a001 = Kcx * px * px; |
644 |
|
*/ |
645 |
|
|
646 |
< |
//scale all three dimensions, let x = y |
646 |
> |
//scale all three dimensions, let c_x = c_y |
647 |
|
a000 = Kcx + Kcy; |
648 |
|
a110 = Kcz; |
649 |
|
c0 = targetFlux_ - Kcx - Kcy - Kcz; |
679 |
|
+ Khy * (fastpow(c * py - py - 1.0, 2) - 1.0); |
680 |
|
break; |
681 |
|
case rnemdPzScale ://we don't really do this, do we? |
682 |
+ |
c = 1 - targetFlux_ / Pcz; |
683 |
+ |
a000 = Kcx; |
684 |
+ |
a110 = Kcy; |
685 |
+ |
c0 = Kcz * c * c - Kcx - Kcy - Kcz; |
686 |
+ |
a001 = px * px * Khx; |
687 |
+ |
a111 = py * py * Khy; |
688 |
+ |
b01 = -2.0 * Khx * px * (1.0 + px); |
689 |
+ |
b11 = -2.0 * Khy * py * (1.0 + py); |
690 |
+ |
c1 = Khx * px * (2.0 + px) + Khy * py * (2.0 + py) |
691 |
+ |
+ Khz * (fastpow(c * pz - pz - 1.0, 2) - 1.0); |
692 |
+ |
break; |
693 |
|
default : |
694 |
|
break; |
695 |
|
} |
733 |
|
r2 = *ri; |
734 |
|
//check if FindRealRoots() give the right answer |
735 |
|
if ( fabs(u0 + r2 * (u1 + r2 * (u2 + r2 * (u3 + r2 * u4)))) > 1e-6 ) { |
736 |
< |
std::cerr << "WARNING! eq solvers might have mistakes!\n"; |
736 |
> |
sprintf(painCave.errMsg, |
737 |
> |
"RNEMD Warning: polynomial solve seems to have an error!"); |
738 |
> |
painCave.isFatal = 0; |
739 |
> |
simError(); |
740 |
|
failRootCount_++; |
741 |
|
} |
742 |
|
//might not be useful w/o rescaling coefficients |
812 |
|
z = bestPair.second; |
813 |
|
break; |
814 |
|
case rnemdPzScale : |
815 |
+ |
x = bestPair.first; |
816 |
+ |
y = bestPair.second; |
817 |
+ |
z = c; |
818 |
+ |
break; |
819 |
|
default : |
820 |
|
break; |
821 |
|
} |
842 |
|
exchangeSum_ += targetFlux_; |
843 |
|
//we may want to check whether the exchange has been successful |
844 |
|
} else { |
845 |
< |
std::cerr << "exchange NOT performed!\n"; |
845 |
> |
std::cerr << "exchange NOT performed!\n";//MPI incompatible |
846 |
|
failTrialCount_++; |
847 |
|
} |
848 |
|
|
934 |
|
valueCount_[binNo] +=3; |
935 |
|
} |
936 |
|
} |
937 |
< |
value = value / OOPSEConstant::energyConvert / OOPSEConstant::kb; |
937 |
> |
value = value / PhysicalConstants::energyConvert / PhysicalConstants::kb; |
938 |
|
|
939 |
|
break; |
940 |
|
case rnemdPx : |
941 |
|
case rnemdPxScale : |
942 |
|
value = mass * vel[0]; |
943 |
|
valueCount_[binNo]++; |
944 |
< |
xVal = mass * vel.x() * vel.x() / OOPSEConstant::energyConvert |
945 |
< |
/ OOPSEConstant::kb; |
946 |
< |
yVal = mass * vel.y() * vel.y() / OOPSEConstant::energyConvert |
947 |
< |
/ OOPSEConstant::kb; |
948 |
< |
zVal = mass * vel.z() * vel.z() / OOPSEConstant::energyConvert |
949 |
< |
/ OOPSEConstant::kb; |
944 |
> |
xVal = mass * vel.x() * vel.x() / PhysicalConstants::energyConvert |
945 |
> |
/ PhysicalConstants::kb; |
946 |
> |
yVal = mass * vel.y() * vel.y() / PhysicalConstants::energyConvert |
947 |
> |
/ PhysicalConstants::kb; |
948 |
> |
zVal = mass * vel.z() * vel.z() / PhysicalConstants::energyConvert |
949 |
> |
/ PhysicalConstants::kb; |
950 |
|
xTempHist_[binNo] += xVal; |
951 |
|
yTempHist_[binNo] += yVal; |
952 |
|
zTempHist_[binNo] += zVal; |
984 |
|
|
985 |
|
stat[Stats::RNEMD_EXCHANGE_TOTAL] = exchangeSum_; |
986 |
|
//or to be more meaningful, define another item as exchangeSum_ / time |
987 |
+ |
int j; |
988 |
|
|
969 |
– |
|
989 |
|
#ifdef IS_MPI |
990 |
|
|
991 |
|
// all processors have the same number of bins, and STL vectors pack their |
995 |
|
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
996 |
|
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &valueCount_[0], |
997 |
|
rnemdLogWidth_, MPI::INT, MPI::SUM); |
998 |
< |
|
998 |
> |
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale) { |
999 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &xTempHist_[0], |
1000 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1001 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &yTempHist_[0], |
1002 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1003 |
> |
MPI::COMM_WORLD.Allreduce(MPI::IN_PLACE, &zTempHist_[0], |
1004 |
> |
rnemdLogWidth_, MPI::REALTYPE, MPI::SUM); |
1005 |
> |
} |
1006 |
|
// If we're the root node, should we print out the results |
1007 |
|
int worldRank = MPI::COMM_WORLD.Get_rank(); |
1008 |
|
if (worldRank == 0) { |
1009 |
|
#endif |
984 |
– |
int j; |
1010 |
|
rnemdLog_ << time; |
1011 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1012 |
|
rnemdLog_ << "\t" << valueHist_[j] / (RealType)valueCount_[j]; |
988 |
– |
valueHist_[j] = 0.0; |
1013 |
|
} |
1014 |
|
rnemdLog_ << "\n"; |
1015 |
|
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale ) { |
1016 |
|
xTempLog_ << time; |
1017 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1018 |
|
xTempLog_ << "\t" << xTempHist_[j] / (RealType)valueCount_[j]; |
995 |
– |
xTempHist_[j] = 0.0; |
1019 |
|
} |
1020 |
|
xTempLog_ << "\n"; |
1021 |
|
yTempLog_ << time; |
1022 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1023 |
|
yTempLog_ << "\t" << yTempHist_[j] / (RealType)valueCount_[j]; |
1001 |
– |
yTempHist_[j] = 0.0; |
1024 |
|
} |
1025 |
|
yTempLog_ << "\n"; |
1026 |
|
zTempLog_ << time; |
1027 |
|
for (j = 0; j < rnemdLogWidth_; j++) { |
1028 |
|
zTempLog_ << "\t" << zTempHist_[j] / (RealType)valueCount_[j]; |
1007 |
– |
zTempHist_[j] = 0.0; |
1029 |
|
} |
1030 |
|
zTempLog_ << "\n"; |
1031 |
|
} |
1011 |
– |
for (j = 0; j < rnemdLogWidth_; j++) valueCount_[j] = 0; |
1032 |
|
#ifdef IS_MPI |
1033 |
|
} |
1034 |
|
#endif |
1035 |
+ |
for (j = 0; j < rnemdLogWidth_; j++) { |
1036 |
+ |
valueCount_[j] = 0; |
1037 |
+ |
valueHist_[j] = 0.0; |
1038 |
+ |
} |
1039 |
+ |
if (rnemdType_ == rnemdPx || rnemdType_ == rnemdPxScale) |
1040 |
+ |
for (j = 0; j < rnemdLogWidth_; j++) { |
1041 |
+ |
xTempHist_[j] = 0.0; |
1042 |
+ |
yTempHist_[j] = 0.0; |
1043 |
+ |
zTempHist_[j] = 0.0; |
1044 |
+ |
} |
1045 |
|
} |
1016 |
– |
|
1046 |
|
} |