ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/ConjugateMinimizer.cpp
Revision: 1064
Committed: Tue Feb 24 15:44:45 2004 UTC (21 years, 2 months ago) by tim
File size: 3985 byte(s)
Log Message:
Using inherit instead of compose to implement Minimizer
both versions are working

File Contents

# Content
1 #include "ConjugateMinimizer.hpp"
2 #include "Utility.hpp"
3 ConjugateMinimizerBase::ConjugateMinimizerBase(NLModel1* nlmodel, MinimizerParameterSet* param)
4 : MinimizerUsingLineSearch(param){
5 int dim;
6
7 model = nlmodel;
8 //set the dimension
9
10 #ifndef IS_MPI
11 dim = model->getDim();
12 #else
13 dim = model->getDim();
14 #endif
15 prevGrad.resize(dim);
16 gradient.resize(dim);
17 prevDirection.resize(dim);
18 direction.resize(dim);
19 }
20
21 bool ConjugateMinimizerBase::isSolvable(){
22
23 //conjuage gradient can only solve unconstrained nonlinear model
24
25 if (!model->hasConstraints())
26 return true;
27 else
28 return false;
29 }
30
31 void ConjugateMinimizerBase::printMinizerInfo(){
32
33 }
34
35 void ConjugateMinimizerBase::minimize(){
36 int maxIteration;
37 int nextResetIter;
38 int resetFrq;
39 int nextWriteIter;
40 int writeFrq;
41 int lsStatus;
42 double gamma;
43 double lamda;
44
45
46 if (!isSolvable()){
47 cout << "ConjugateMinimizerBase Error: This nonlinear model can not be solved by " << minimizerName <<endl;
48
49 exit(1);
50 }
51
52 printMinizerInfo();
53
54 resetFrq = paramSet->getResetFrq();
55 nextResetIter = resetFrq;
56
57 writeFrq = paramSet->getWriteFrq();
58 nextWriteIter = writeFrq;
59
60 minX = model->getX();
61 gradient = model->calcGrad();
62
63 for(int i = 0; i < direction.size(); i++)
64 direction[i] = -gradient[i];
65
66 maxIteration = paramSet->getMaxIteration();
67
68 for(currentIter = 1;currentIter <= maxIteration; currentIter++){
69
70 // perform line search to minimize f(x + lamda * direction) where stepSize > 0
71 lsMinimizer->minimize(direction, 0.0, 0.01);
72
73 lsStatus = lsMinimizer->getMinimizationStatus();
74
75 lamda = lsMinimizer->getMinVar();
76
77 if(lsStatus ==MINSTATUS_ERROR){
78 if (lamda == 0){
79
80 for(int i = 0; i < direction.size(); i++)
81 direction[i] = -prevGrad[i];
82
83 continue;
84 }
85 minStatus = MINSTATUS_ERROR;
86 return;
87 }
88 else{
89 prevMinX = minX;
90 }
91
92 for(int i = 0; i < direction.size(); i++)
93 minX[i] = minX[i] + lamda * direction[i];
94
95 //calculate the gradient
96 prevGrad = gradient;
97
98 model->setX(minX);
99 gradient = model->calcGrad();
100
101 minX = model->getX();
102 // stop if converge
103 if (checkConvergence() > 0){
104 writeOut(minX, currentIter);
105
106 minStatus = MINSTATUS_CONVERGE;
107 return;
108 }
109
110
111 //calculate the
112 gamma = calcGamma(gradient, prevGrad);
113
114 // update new direction
115 prevDirection = direction;
116
117 for(int i = 0; i < direction.size(); i++)
118 direction[i] = -gradient[i] + gamma * direction[i];
119
120 //
121 if (currentIter == nextWriteIter){
122 nextWriteIter += writeFrq;
123 writeOut(minX, currentIter);
124 }
125
126 if (currentIter == nextResetIter){
127 reset();
128 nextResetIter += resetFrq;
129 }
130
131 }
132
133 // if writeFrq is not a multipiler of maxIteration, we need to write the final result
134 // otherwise, we already write it inside the loop, just skip it
135 if(currentIter - 1 != (nextWriteIter - writeFrq))
136 writeOut(minX, currentIter);
137
138 minStatus = MINSTATUS_MAXITER;
139 return;
140 }
141
142 int ConjugateMinimizerBase::checkConvergence(){
143
144 //test absolute gradient tolerance
145
146 if (sqrt(dotProduct(gradient, gradient)) < paramSet->getGTol())
147 return 1;
148 else
149 return -1;
150 }
151
152 void ConjugateMinimizerBase::reset(){
153
154 }
155
156 double FRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){
157 return dotProduct(newGrad, newGrad) / dotProduct(oldGrad, newGrad);
158 }
159
160 double PRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){
161 double gamma;
162 vector<double> deltaGrad;
163
164 for(int i = 0; i < newGrad.size(); i++)
165 deltaGrad.push_back(newGrad[i] - oldGrad[i]);
166
167 return dotProduct(deltaGrad, newGrad) / dotProduct(oldGrad, oldGrad);
168
169 }

Properties

Name Value
svn:executable *