00001 // This is core/vnl/vnl_nonlinear_minimizer.h 00002 #ifndef vnl_nonlinear_minimizer_h_ 00003 #define vnl_nonlinear_minimizer_h_ 00004 #ifdef VCL_NEEDS_PRAGMA_INTERFACE 00005 #pragma interface 00006 #endif 00007 //: 00008 // \file 00009 // \brief Base class for nonlinear optimization 00010 // \author Andrew W. Fitzgibbon, Oxford RRG 00011 // \date 22 Aug 1999 00012 // 00013 // \verbatim 00014 // Modifications 00015 // 22 Mar.2001 - dac - added binary io and tidied documentation 00016 // Feb.2002 - Peter Vanroose - brief doxygen comment placed on single line 00017 // \endverbatim 00018 00019 #include <vcl_string.h> 00020 #include <vnl/vnl_matrix.h> 00021 00022 00023 //: vnl_nonlinear_minimizer is a base class for nonlinear optimization. 00024 // It defines a few common abilities such as get_num_evaluations. 00025 // Known derived classes are: 00026 // - vnl_levenberg_marquardt 00027 // - vnl_lbfgs 00028 // - vnl_conjugate_gradient 00029 // - vnl_brent 00030 // - vnl_powell 00031 class vnl_nonlinear_minimizer 00032 { 00033 public: 00034 vnl_nonlinear_minimizer(); 00035 00036 virtual ~vnl_nonlinear_minimizer(); 00037 00038 00039 //: Set the convergence tolerance on F (sum of squared residuals). 00040 // When the differences in successive RMS errors is less than this, the 00041 // routine terminates. So this is effectively the desired precision of your 00042 // minimization. Setting it too low wastes time, too high might cause early 00043 // convergence. The default of 1e-9 is on the safe side, but if speed is an 00044 // issue, you can try raising it. 00045 void set_f_tolerance(double v) { ftol = v; } 00046 double get_f_tolerance() const { return ftol; } 00047 00048 //: Set the convergence tolerance on X. 00049 // When the length of the steps taken in X are about this long, the routine 00050 // terminates. The default is 1e-8, which should work for many problems, 00051 // but if you can get away with 1e-4, say, minimizations will be much quicker. 00052 void set_x_tolerance(double v) { 00053 xtol = v; 00054 epsfcn = xtol * 0.001; 00055 } 00056 double get_x_tolerance() const { return xtol; } 00057 00058 //: Set the convergence tolerance on Grad(F)' * F. 00059 void set_g_tolerance(double v) { gtol = v; } 00060 double get_g_tolerance() const { return gtol; } 00061 00062 //: Set the termination maximum number of iterations. 00063 void set_max_function_evals(int v) { maxfev = v; } 00064 int get_max_function_evals() const { return maxfev; } 00065 00066 //: Set the step length for FD Jacobian. 00067 // Be aware that set_x_tolerance will reset this to xtol * 0.001. 00068 // The default is 1e-11. 00069 void set_epsilon_function(double v) { epsfcn = v; } 00070 double get_epsilon_function() const { return epsfcn; } 00071 00072 //: Turn on per-iteration printouts. 00073 void set_trace(bool on) { trace = on; } 00074 bool get_trace() const { return trace; } 00075 00076 //: Set verbose flag 00077 void set_verbose(bool verb) { verbose_ = verb; } 00078 bool get_verbose() const { return verbose_; } 00079 00080 //: Set check_derivatives flag. Negative values may mean fewer checks. 00081 void set_check_derivatives(int cd) { check_derivatives_ = cd; } 00082 int get_check_derivatives() const { return check_derivatives_; } 00083 00084 //: Return the error of the function when it was evaluated at the start point of the last minimization. 00085 // For minimizers driven by a vnl_least_squares_function (Levenberg-Marquardt) 00086 // this is usually the RMS error. 00087 // For those driven by a vnl_cost_function (CG, LBFGS, Amoeba) it is simply the 00088 // value of the vnl_cost_function at the start (usually the sum of squared residuals). 00089 double get_start_error() const { return start_error_; } 00090 00091 //:Return the best error that was achieved by the last minimization, corresponding to the returned x. 00092 double get_end_error() const { return end_error_; } 00093 00094 //:Return the total number of times the function was evaluated by the last minimization. 00095 int get_num_evaluations() const { return num_evaluations_; } 00096 00097 //:Return the number of {\em iterations} in the last minimization. 00098 // Each iteration may have comprised several function evaluations. 00099 int get_num_iterations() const { return num_iterations_; } 00100 00101 //:Some generic return codes that apply to all minimizers. 00102 enum ReturnCodes { 00103 ERROR_FAILURE =-1, 00104 ERROR_DODGY_INPUT = 0, 00105 CONVERGED_FTOL = 1, 00106 CONVERGED_XTOL = 2, 00107 CONVERGED_XFTOL = 3, 00108 CONVERGED_GTOL = 4, 00109 FAILED_TOO_MANY_ITERATIONS = 5, 00110 TOO_MANY_ITERATIONS = FAILED_TOO_MANY_ITERATIONS, // for backward-compatibility 00111 FAILED_FTOL_TOO_SMALL = 6, 00112 FAILED_XTOL_TOO_SMALL = 7, 00113 FAILED_GTOL_TOO_SMALL = 8, 00114 FAILED_USER_REQUEST = 9 00115 }; 00116 00117 //:Whether the error reduced in the last minimization 00118 bool obj_value_reduced() { return failure_code_ != ERROR_FAILURE && failure_code_ != ERROR_DODGY_INPUT && end_error_ < start_error_; } 00119 00120 //:Return the covariance of the estimate at the end. 00121 virtual vnl_matrix<double> const& get_covariance(); 00122 00123 //: Return the name of the class. 00124 // Used by polymorphic IO 00125 virtual vcl_string is_a() const; 00126 00127 //: Return true if the name of the class matches the argument. 00128 // Used by polymorphic IO 00129 virtual bool is_class(vcl_string const& s) const; 00130 00131 //:Return the failure code of the last minimization 00132 ReturnCodes get_failure_code() const { return failure_code_; } 00133 00134 protected: 00135 // Data Members-------------------------------------------------------------- 00136 // Input variables 00137 double xtol; //!< Termination tolerance on X (solution vector) 00138 long maxfev; //!< Termination maximum number of iterations 00139 double ftol; //!< Termination tolerance on F (sum of squared residuals) 00140 double gtol; //!< Termination tolerance on Grad(F)' * F = 0 00141 double epsfcn; //!< Step length for FD Jacobian 00142 00143 // Output variables 00144 unsigned num_iterations_; 00145 long num_evaluations_; 00146 double start_error_; 00147 double end_error_; 00148 00149 bool trace; 00150 00151 // Verbose flag. 00152 bool verbose_; 00153 int check_derivatives_; 00154 ReturnCodes failure_code_; 00155 00156 void reset(); 00157 00158 //: Called by derived classes after each function evaluation. 00159 void report_eval(double f); 00160 00161 //: Called by derived classes after each iteration. 00162 // When true is returned, minimizer should stop with code FAILED_USER_REQUEST. 00163 // Derived classes can redefine this function to make the optimizer stop when a condition is satisfied. 00164 virtual bool report_iter(); 00165 }; 00166 00167 #endif // vnl_nonlinear_minimizer_h_