core/vnl/algo/vnl_lbfgs.h
Go to the documentation of this file.
00001 // This is core/vnl/algo/vnl_lbfgs.h
00002 #ifndef vnl_lbfgs_h_
00003 #define vnl_lbfgs_h_
00004 #ifdef VCL_NEEDS_PRAGMA_INTERFACE
00005 #pragma interface
00006 #endif
00007 //:
00008 // \file
00009 // \brief Limited memory Broyden Fletcher Goldfarb Shannon minimization
00010 // \author Andrew W. Fitzgibbon, Oxford RRG
00011 // \date   22 Aug 99
00012 //
00013 // \verbatim
00014 // Modifications
00015 //  990822 AWF Initial version.
00016 //  dac (Manchester) 28/03/2001: tidied up documentation
00017 //  scottim 4/02/2002: Added a better description
00018 // \endverbatim
00019 //
00020 
00021 #include <vnl/vnl_cost_function.h>
00022 #include <vnl/vnl_nonlinear_minimizer.h>
00023 
00024 //: Limited memory Broyden Fletcher Goldfarb Shannon minimization
00025 // Considered to be the best optimisation algorithm for functions
00026 // which are well behaved (i.e. locally smooth
00027 // without too many local minima,) have 1st derivatives available,
00028 // and do not have a structure that makes them suitable for alternative
00029 // methods (e.g. if f(x) is a sum of squared terms you should use
00030 // vnl_levenberg_marquardt.)
00031 //
00032 // This limited-memory rank-2 quasi-newton method
00033 // maintains an estimate of (the inverse of) the Hessian matrix of f at x.
00034 // Unlike Newton's method, it doesn't need 2nd derivatives of f(x),
00035 // has superlinear rather than quadratic convergence and is
00036 // better behaved away from minima. 2 ranks of this matrix are updated at each
00037 // step. In order to reduce memory and time requirements, this limited memory
00038 // version of BFGS only maintains a certain number of vector corrections
00039 // to a diagonal estimate of the inverse Hessian estimate.
00040 
00041 class vnl_lbfgs : public vnl_nonlinear_minimizer
00042 {
00043  public:
00044   vnl_lbfgs();
00045   vnl_lbfgs(vnl_cost_function& f);
00046 
00047   bool minimize(vnl_vector<double>& x);
00048 
00049   //: Step accuracy/speed tradeoff.
00050   // Effectively the number of correction vectors to the diagonal approximation
00051   // of the inverse Hessian estimate that are kept.
00052   //
00053   // Large values of M will result in excessive computing time.
00054   // 3<= memory <=7 is recommended.
00055   // Memory requirements will be roughly Const+sizeof(element)*dim(X)*memory.
00056   // Default is 5.
00057   int memory;
00058 
00059   //: Accuracy of line search.
00060   // If function evaluations are cheap wrt the actual minimization steps,
00061   // change to 0.1, from default of 0.9;
00062   double line_search_accuracy;
00063 
00064   //: Default step length in line search.
00065   // If, on tracing, the STP is always 1, then you could try setting this to a
00066   // higher value to see how far along the gradient the minimum typically is.
00067   // Then set this to a number just below that to get maximally far with the
00068   // single evaluation.
00069   double default_step_length;
00070 
00071  private:
00072   void init_parameters();
00073   vnl_cost_function* f_;
00074   //  vnl_lbfgs() {} // default constructor makes no sense
00075   // does too.  Can set values for parameters.
00076 };
00077 
00078 #endif // vnl_lbfgs_h_