ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/ConjugateMinimizer.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/ConjugateMinimizer.cpp (file contents):
Revision 1015 by tim, Tue Feb 3 22:54:52 2004 UTC vs.
Revision 1064 by tim, Tue Feb 24 15:44:45 2004 UTC

# Line 1 | Line 1
1   #include "ConjugateMinimizer.hpp"
2   #include "Utility.hpp"
3 + ConjugateMinimizerBase::ConjugateMinimizerBase(NLModel1* nlmodel, MinimizerParameterSet* param)
4 +                                : MinimizerUsingLineSearch(param){
5 +  int dim;
6 +  
7 +  model = nlmodel;
8 +  //set the dimension
9 +  
10 + #ifndef IS_MPI
11 +  dim = model->getDim();
12 + #else
13 +  dim = model->getDim();
14 + #endif
15 +  prevGrad.resize(dim);    
16 +  gradient.resize(dim);
17 +  prevDirection.resize(dim);
18 +  direction.resize(dim);
19 + }
20  
21   bool ConjugateMinimizerBase::isSolvable(){
22  
# Line 11 | Line 28 | bool ConjugateMinimizerBase::isSolvable(){
28      return false;
29   }
30  
14 void ConjugateMinimizerBase::Init(){
15
16 }
17
31   void ConjugateMinimizerBase::printMinizerInfo(){
32  
33   }
34  
35 < void ConjugateMinimizerBase::Minimize(){
35 > void ConjugateMinimizerBase::minimize(){
36    int maxIteration;
37    int nextResetIter;
38    int resetFrq;
# Line 43 | Line 56 | void ConjugateMinimizerBase::Minimize(){
56  
57    writeFrq = paramSet->getWriteFrq();
58    nextWriteIter = writeFrq;
46  
47  prevGrad = model->calcGrad();
59  
60 <  direction = prevGrad;
60 >  minX = model->getX();
61 >  gradient = model->calcGrad();
62 >
63 >  for(int i = 0; i < direction.size(); i++)
64 >    direction[i] = -gradient[i];
65    
66    maxIteration = paramSet->getMaxIteration();
67  
68 <  for(currentIter = 0;currentIter < maxIteration; currentIter++){
68 >  for(currentIter = 1;currentIter <= maxIteration; currentIter++){
69  
70 <    // perform line search to minimize f(x + stepSize * direction) where stepSize > 0
71 <    lsMinimizer->minimize(direction, 0.0, 1.0);
70 >    // perform line search to minimize f(x + lamda * direction) where stepSize > 0
71 >    lsMinimizer->minimize(direction, 0.0, 0.01);
72      
73      lsStatus = lsMinimizer->getMinimizationStatus();
74 +
75 +    lamda = lsMinimizer->getMinVar();
76      
77      if(lsStatus ==MINSTATUS_ERROR){
78 <      minStatus = MINSTATUS_ERROR;
78 >      if (lamda == 0){
79 >
80 >          for(int i = 0; i < direction.size(); i++)
81 >          direction[i] = -prevGrad[i];
82 >          
83 >        continue;
84 >      }
85 >        minStatus = MINSTATUS_ERROR;
86        return;
87      }
88 <    
89 <    prevMinX = minX;
90 <    lamda = lsMinimizer->getMinVar();
88 >    else{
89 >      prevMinX = minX;
90 >    }
91  
92      for(int i = 0; i < direction.size(); i++)
93        minX[i] = minX[i] + lamda * direction[i];
94  
95      //calculate the gradient
96      prevGrad = gradient;
97 <    
97 >
98 >    model->setX(minX);
99      gradient = model->calcGrad();
100  
101 +    minX = model->getX();
102      // stop if converge
103      if (checkConvergence() > 0){
104        writeOut(minX, currentIter);
# Line 89 | Line 115 | void ConjugateMinimizerBase::Minimize(){
115      prevDirection = direction;
116  
117      for(int i = 0; i < direction.size(); i++)  
118 <      direction[i] += gamma * direction[i];
118 >      direction[i] = -gradient[i] + gamma * direction[i];
119  
120      //
121      if (currentIter == nextWriteIter){
# Line 106 | Line 132 | void ConjugateMinimizerBase::Minimize(){
132  
133    // if writeFrq is not a multipiler of maxIteration, we need to write the final result
134    // otherwise, we already write it inside the loop, just skip it
135 <  if(currentIter != (nextWriteIter - writeFrq))
135 >  if(currentIter - 1 != (nextWriteIter - writeFrq))
136      writeOut(minX, currentIter);
137  
138    minStatus = MINSTATUS_MAXITER;
# Line 117 | Line 143 | int ConjugateMinimizerBase::checkConvergence(){
143  
144    //test absolute gradient tolerance
145    
146 <  if (sqrt(dot(gradient, gradient)) < paramSet->getGradTol())
146 >  if (sqrt(dotProduct(gradient, gradient)) < paramSet->getGTol())
147      return 1;
148    else
149      return -1;
# Line 128 | Line 154 | double FRCGMinimizer::calcGamma(vector<double>& newGra
154   }
155  
156   double FRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){
157 <  return dot(newGrad, newGrad) / dot(oldGrad, newGrad);
157 >  return dotProduct(newGrad, newGrad) / dotProduct(oldGrad, newGrad);
158   }
159  
160   double PRCGMinimizer::calcGamma(vector<double>& newGrad, vector<double>& oldGrad){
# Line 138 | Line 164 | double PRCGMinimizer::calcGamma(vector<double>& newGra
164    for(int i = 0; i < newGrad.size(); i++)
165      deltaGrad.push_back(newGrad[i] - oldGrad[i]);
166  
167 <  return dot(deltaGrad, newGrad) / dot(oldGrad, oldGrad);
167 >  return dotProduct(deltaGrad, newGrad) / dotProduct(oldGrad, oldGrad);
168    
169   }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines