| 1 |
|
#include "ConjugateMinimizer.hpp" |
| 2 |
+ |
#include "Utility.hpp" |
| 3 |
+ |
ConjugateMinimizerBase::ConjugateMinimizerBase(NLModel1* nlmodel, MinimizerParameterSet* param) |
| 4 |
+ |
: MinimizerUsingLineSearch(param){ |
| 5 |
+ |
int dim; |
| 6 |
+ |
|
| 7 |
+ |
model = nlmodel; |
| 8 |
+ |
//set the dimension |
| 9 |
+ |
|
| 10 |
+ |
#ifndef IS_MPI |
| 11 |
+ |
dim = model->getDim(); |
| 12 |
+ |
#else |
| 13 |
+ |
dim = model->getDim(); |
| 14 |
+ |
#endif |
| 15 |
+ |
prevGrad.resize(dim); |
| 16 |
+ |
gradient.resize(dim); |
| 17 |
+ |
prevDirection.resize(dim); |
| 18 |
+ |
direction.resize(dim); |
| 19 |
+ |
} |
| 20 |
+ |
|
| 21 |
|
bool ConjugateMinimizerBase::isSolvable(){ |
| 22 |
|
|
| 23 |
|
//conjuage gradient can only solve unconstrained nonlinear model |
| 24 |
|
|
| 25 |
< |
if (!model->hasConstraint()) |
| 25 |
> |
if (!model->hasConstraints()) |
| 26 |
|
return true; |
| 27 |
|
else |
| 28 |
|
return false; |
| 29 |
|
} |
| 30 |
|
|
| 12 |
– |
void ConjugateMinimizerBase::Init(){ |
| 13 |
– |
|
| 14 |
– |
} |
| 15 |
– |
|
| 31 |
|
void ConjugateMinimizerBase::printMinizerInfo(){ |
| 32 |
|
|
| 33 |
|
} |
| 34 |
|
|
| 35 |
< |
void ConjugateMinimizerBase::Minimize(){ |
| 35 |
> |
void ConjugateMinimizerBase::minimize(){ |
| 36 |
|
int maxIteration; |
| 37 |
< |
int nextRestIter; |
| 37 |
> |
int nextResetIter; |
| 38 |
|
int resetFrq; |
| 39 |
|
int nextWriteIter; |
| 40 |
|
int writeFrq; |
| 41 |
+ |
int lsStatus; |
| 42 |
+ |
double gamma; |
| 43 |
+ |
double lamda; |
| 44 |
|
|
| 45 |
+ |
|
| 46 |
|
if (!isSolvable()){ |
| 47 |
< |
cout << "ConjugateMinimizerBase Error: This nonlinear model can not be solved by " << methodName <<endl; |
| 47 |
> |
cout << "ConjugateMinimizerBase Error: This nonlinear model can not be solved by " << minimizerName <<endl; |
| 48 |
|
|
| 49 |
|
exit(1); |
| 50 |
|
} |
| 52 |
|
printMinizerInfo(); |
| 53 |
|
|
| 54 |
|
resetFrq = paramSet->getResetFrq(); |
| 55 |
< |
nextRestIter = resetFrq; |
| 55 |
> |
nextResetIter = resetFrq; |
| 56 |
|
|
| 57 |
|
writeFrq = paramSet->getWriteFrq(); |
| 58 |
|
nextWriteIter = writeFrq; |
| 40 |
– |
|
| 41 |
– |
prevGrad = model->calcGrad(); |
| 59 |
|
|
| 60 |
< |
direction = preGrad; |
| 60 |
> |
minX = model->getX(); |
| 61 |
> |
gradient = model->calcGrad(); |
| 62 |
> |
|
| 63 |
> |
for(int i = 0; i < direction.size(); i++) |
| 64 |
> |
direction[i] = -gradient[i]; |
| 65 |
|
|
| 66 |
|
maxIteration = paramSet->getMaxIteration(); |
| 67 |
|
|
| 68 |
< |
for(currentIter = 0;currentIter < maxIteration; currentIter++){ |
| 68 |
> |
for(currentIter = 1;currentIter <= maxIteration; currentIter++){ |
| 69 |
|
|
| 70 |
< |
// perform line search to minimize f(x + stepSize * direction) where stepSize > 0 |
| 70 |
> |
// perform line search to minimize f(x + lamda * direction) where stepSize > 0 |
| 71 |
|
lsMinimizer->minimize(direction, 0.0, 1.0); |
| 72 |
|
|
| 73 |
|
lsStatus = lsMinimizer->getMinimizationStatus(); |
| 78 |
|
} |
| 79 |
|
|
| 80 |
|
prevMinX = minX; |
| 81 |
< |
minX = minX + lsMinimizer->getMinVar() * direction; |
| 81 |
> |
lamda = lsMinimizer->getMinVar(); |
| 82 |
|
|
| 83 |
+ |
for(int i = 0; i < direction.size(); i++) |
| 84 |
+ |
minX[i] = minX[i] + lamda * direction[i]; |
| 85 |
+ |
|
| 86 |
|
//calculate the gradient |
| 87 |
|
prevGrad = gradient; |
| 88 |
< |
|
| 88 |
> |
|
| 89 |
> |
model->setX(minX); |
| 90 |
|
gradient = model->calcGrad(); |
| 91 |
|
|
| 92 |
|
// stop if converge |
| 93 |
< |
convStatus = checkConvergence(); |
| 69 |
< |
if (convStatus == ){ |
| 93 |
> |
if (checkConvergence() > 0){ |
| 94 |
|
writeOut(minX, currentIter); |
| 95 |
|
|
| 96 |
|
minStatus = MINSTATUS_CONVERGE; |
| 99 |
|
|
| 100 |
|
|
| 101 |
|
//calculate the |
| 102 |
< |
gamma = calcGamma(grad, preGrad); |
| 102 |
> |
gamma = calcGamma(gradient, prevGrad); |
| 103 |
|
|
| 104 |
|
// update new direction |
| 105 |
|
prevDirection = direction; |
| 82 |
– |
direction += gamma * direction; |
| 106 |
|
|
| 107 |
+ |
for(int i = 0; i < direction.size(); i++) |
| 108 |
+ |
direction[i] = -gradient[i] + gamma * direction[i]; |
| 109 |
+ |
|
| 110 |
|
// |
| 111 |
|
if (currentIter == nextWriteIter){ |
| 112 |
|
nextWriteIter += writeFrq; |
| 133 |
|
|
| 134 |
|
//test absolute gradient tolerance |
| 135 |
|
|
| 136 |
< |
if (norm2(gradient) < paramSet->absGradTol) |
| 136 |
> |
if (sqrt(dotProduct(gradient, gradient)) < paramSet->getGTol()) |
| 137 |
|
return 1; |
| 138 |
|
else |
| 139 |
|
return -1; |
| 144 |
|
} |
| 145 |
|
|
| 146 |
|
double FRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){ |
| 147 |
< |
return norm2(newGrad) / norm2(oldGrad); |
| 147 |
> |
return dotProduct(newGrad, newGrad) / dotProduct(oldGrad, newGrad); |
| 148 |
|
} |
| 149 |
|
|
| 150 |
|
double PRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){ |
| 151 |
|
double gamma; |
| 152 |
|
vector<double> deltaGrad; |
| 153 |
+ |
|
| 154 |
+ |
for(int i = 0; i < newGrad.size(); i++) |
| 155 |
+ |
deltaGrad.push_back(newGrad[i] - oldGrad[i]); |
| 156 |
|
|
| 157 |
< |
deltaGrad = newGrad - oldGrad; |
| 129 |
< |
|
| 130 |
< |
return norm(deltaGrad, newGrad) / norm2(oldGrad); |
| 157 |
> |
return dotProduct(deltaGrad, newGrad) / dotProduct(oldGrad, oldGrad); |
| 158 |
|
|
| 159 |
|
} |