core/vnl/algo/vnl_levenberg_marquardt.h
Go to the documentation of this file.
00001 // This is core/vnl/algo/vnl_levenberg_marquardt.h
00002 #ifndef vnl_levenberg_marquardt_h_
00003 #define vnl_levenberg_marquardt_h_
00004 #ifdef VCL_NEEDS_PRAGMA_INTERFACE
00005 #pragma interface
00006 #endif
00007 //:
00008 // \file
00009 // \brief Levenberg Marquardt nonlinear least squares
00010 // \author Andrew W. Fitzgibbon, Oxford RRG
00011 // \date   31 Aug 96
00012 //
00013 // \verbatim
00014 // Modifications
00015 //  AGAP 160701 Some comments. Changed minimize to call the correct minimization
00016 //              routine.
00017 //  RWMC 001097 Added verbose flag to get rid of all that blathering.
00018 //  AWF  151197 Added trace flag to increase blather.
00019 //   Feb.2002 - Peter Vanroose - brief doxygen comment placed on single line
00020 // \endverbatim
00021 //
00022 
00023 #include <vcl_iosfwd.h>
00024 #include <vnl/vnl_vector.h>
00025 #include <vnl/vnl_vector_fixed.h>
00026 #include <vnl/vnl_matrix.h>
00027 #include <vnl/vnl_nonlinear_minimizer.h>
00028 
00029 class vnl_least_squares_function;
00030 
00031 //: Levenberg Marquardt nonlinear least squares
00032 //  vnl_levenberg_marquardt is an interface to the MINPACK routine lmdif,
00033 //  and implements Levenberg Marquardt nonlinear fitting.  The function
00034 //  to be minimized is passed as a vnl_least_squares_function object, which
00035 //  may or may not wish to provide derivatives.  If derivatives are not
00036 //  supplied, they are calculated by forward differencing, which costs
00037 //  one function evaluation per dimension, but is perfectly accurate.
00038 //  (See Hartley in ``Applications of Invariance in Computer Vision''
00039 //  for example).
00040 
00041 class vnl_levenberg_marquardt : public vnl_nonlinear_minimizer
00042 {
00043  public:
00044 
00045   //: Initialize with the function object that is to be minimized.
00046   vnl_levenberg_marquardt(vnl_least_squares_function& f) { init(&f); }
00047 
00048 #if 0
00049   //: Initialize as above, and then run minimization.
00050   //
00051   // obsolete, as virtuals in base class vnl_nonlinear_minimizer not valid...
00052   // i.e. if minimize() calls base::get_covariance(), it will call the
00053   // base version rather than any overridden here or in classes derived
00054   // from this.  This is an argument against computation in constructors.
00055   // You should replace code like
00056   // \code
00057   //    vnl_levenberg_marquardt lm(f, x);
00058   // \endcode
00059   // with
00060   // \code
00061   //    vnl_levenberg_marquardt lm(f);
00062   //    lm.minimize(x);
00063   // \endcode
00064   // Or
00065   // \code
00066   //    x = vnl_levenberg_marquardt_minimize(f, x);
00067   // \endcode
00068 
00069   vnl_levenberg_marquardt(vnl_least_squares_function& f,
00070                           vnl_vector<double>& x)
00071   {
00072     init(&f);
00073     minimize(x);
00074   }
00075 #endif
00076 
00077   ~vnl_levenberg_marquardt();
00078 
00079   //: Minimize the function supplied in the constructor until convergence or failure.
00080   //  On return, x is such that f(x) is the lowest value achieved.
00081   //  Returns true for convergence, false for failure.
00082   //  Does not use the gradient even if the cost function provides one.
00083   bool minimize_without_gradient(vnl_vector<double>& x);
00084 
00085   //: Minimize the function supplied in the constructor until convergence or failure.
00086   //  On return, x is such that f(x) is the lowest value achieved.
00087   //  Returns true for convergence, false for failure.
00088   //  The cost function must provide a gradient.
00089   bool minimize_using_gradient  (vnl_vector<double>& x);
00090 
00091   //: Calls minimize_using_gradient() or minimize_without_gradient(), 
00092   // depending on whether the cost function provides a gradient.
00093   bool minimize(vnl_vector<double>& x);
00094   bool minimize(vnl_vector_fixed<double,1>& x) { vnl_vector<double> y=x.extract(1); bool b=minimize(y); x=y; return b; }
00095   bool minimize(vnl_vector_fixed<double,2>& x) { vnl_vector<double> y=x.extract(2); bool b=minimize(y); x=y; return b; }
00096   bool minimize(vnl_vector_fixed<double,3>& x) { vnl_vector<double> y=x.extract(3); bool b=minimize(y); x=y; return b; }
00097   bool minimize(vnl_vector_fixed<double,4>& x) { vnl_vector<double> y=x.extract(4); bool b=minimize(y); x=y; return b; }
00098 
00099   // Coping with failure-------------------------------------------------------
00100 
00101   //: Provide an ASCII diagnosis of the last minimization on vcl_ostream.
00102   void diagnose_outcome(/*vcl_cerr*/) const;
00103   void diagnose_outcome(vcl_ostream&) const;
00104 
00105   //: Return J'*J computed at last minimum.
00106   //  it is an approximation of inverse of covariance 
00107   vnl_matrix<double> const& get_JtJ();
00108 
00109  protected:
00110 
00111   vnl_least_squares_function* f_;
00112   vnl_matrix<double> fdjac_; // Computed during lmdif/lmder
00113   vnl_vector<long>    ipvt_; // Also computed, both needed to get J'*J at end.
00114 
00115   vnl_matrix<double> inv_covar_;
00116   bool set_covariance_; // Set if covariance_ holds J'*J
00117 
00118   void init(vnl_least_squares_function* f);
00119 
00120   // Communication with callback
00121   static void lmdif_lsqfun(long* m, long* n, double* x,
00122                            double* fx, long* iflag, void* userdata);
00123   static void lmder_lsqfun(long* m, long* n, double* x,
00124                            double* fx, double* fJ, long*, long* iflag,
00125                            void* userdata);
00126 };
00127 
00128 //: Find minimum of "f", starting at "initial_estimate", and return.
00129 vnl_vector<double> vnl_levenberg_marquardt_minimize(vnl_least_squares_function& f,
00130                                                     vnl_vector<double> const& initial_estimate);
00131 
00132 
00133 #endif // vnl_levenberg_marquardt_h_