core/vnl/algo/vnl_conjugate_gradient.cxx
Go to the documentation of this file.
00001 // This is core/vnl/algo/vnl_conjugate_gradient.cxx
00002 #ifdef VCL_NEEDS_PRAGMA_INTERFACE
00003 #pragma implementation
00004 #endif
00005 //:
00006 // \file
00007 // \author Geoffrey Cross, Oxford RRG
00008 // \date   15 Feb 99
00009 //
00010 //-----------------------------------------------------------------------------
00011 #include "vnl_conjugate_gradient.h"
00012 
00013 #include <vcl_iostream.h>
00014 
00015 #include <vnl/vnl_cost_function.h>
00016 #include <vnl/vnl_vector_ref.h>
00017 #include <vnl/algo/vnl_netlib.h>
00018 
00019 #if 0
00020 // external netlib function
00021 extern "C"
00022 int cg_( double *x,                     // IO start guess
00023          double *e,                     // O max-norm of gradient
00024          int    *it,                    // O number of iterations performed
00025          double *step,                  // I step=0 make guess at first direction
00026                                         // O step size along search direction for final iteration
00027          double *t,                     // I tolerance (iterations stop when max-norm of gradient < t)
00028          int *limit,                    // I maximum number of iterations
00029          int *n,                        // I number of unknowns
00030          int *m,                        // I number of iterations before renormalizing (normally m=n)
00031          double value( double *x),      // I value(x) is cost at x
00032          int grad( double *g,
00033                    double *x),          // I grad(g,x) puts gradient into g at x
00034          int both( double *v,
00035                    double *g,
00036                    double *x),          // I both(v,g,x) puts value in v and gradient in g at x
00037          int pre( double *y,
00038                   double *z),           // I preconditions (not necessarily needed) pre(y,z)
00039          double *h );                   // I space to work size h = 3*n
00040 #endif
00041 
00042 /////////////////////////////////////
00043 
00044 vnl_conjugate_gradient::~vnl_conjugate_gradient()
00045 {
00046 }
00047 
00048 void vnl_conjugate_gradient::init(vnl_cost_function &f)
00049 {
00050   f_= &f;
00051   num_iterations_ = 0;
00052   num_evaluations_ = 0;
00053   start_error_ = 0;
00054   end_error_ = 0;
00055 }
00056 
00057 ///////////////////////////////////////
00058 
00059 double vnl_conjugate_gradient::valuecomputer_(double *x, void* userdata)
00060 {
00061   vnl_conjugate_gradient* self =
00062     static_cast<vnl_conjugate_gradient*>(userdata);
00063   vnl_cost_function* f = self->f_;
00064   vnl_vector_ref<double> ref_x(f->get_number_of_unknowns(), x);
00065 
00066   self->num_evaluations_++;
00067 
00068   return f->f(ref_x);
00069 }
00070 
00071 void vnl_conjugate_gradient::gradientcomputer_(double *g, double *x, void* userdata)
00072 {
00073   vnl_conjugate_gradient* self =
00074     static_cast<vnl_conjugate_gradient*>(userdata);
00075   vnl_cost_function* f = self->f_;
00076   vnl_vector_ref<double> ref_x(f->get_number_of_unknowns(), x);
00077   vnl_vector_ref<double> ref_g(f->get_number_of_unknowns(), g);
00078 
00079   f->gradf(ref_x, ref_g);
00080 }
00081 
00082 void vnl_conjugate_gradient::valueandgradientcomputer_(double *v, double *g, double *x, void* userdata)
00083 {
00084   vnl_conjugate_gradient* self =
00085     static_cast<vnl_conjugate_gradient*>(userdata);
00086   vnl_cost_function* f = self->f_;
00087   vnl_vector_ref<double> ref_x(f->get_number_of_unknowns(), x);
00088   vnl_vector_ref<double> ref_g(f->get_number_of_unknowns(), g);
00089 
00090   f->compute(ref_x, v, &ref_g);
00091 }
00092 
00093 void vnl_conjugate_gradient::preconditioner_( double *out, double *in, void* userdata)
00094 {
00095   // FIXME - there should be some way to set a preconditioner if you have one
00096   // e.g. P = inv(diag(A'A)) for linear least squares systems.
00097 
00098   vnl_conjugate_gradient* self =
00099     static_cast<vnl_conjugate_gradient*>(userdata);
00100   vnl_cost_function* f = self->f_;
00101 
00102   int n = f->get_number_of_unknowns();
00103   for (int i=0; i < n; ++i)
00104     out[i] = in[i];
00105 }
00106 
00107 ///////////////////////////////////////
00108 
00109 // avoid anachronism warning from fussy compilers
00110 #ifdef VCL_SUNPRO_CC
00111 extern "C" double vnl_conjugate_gradient__valuecomputer_( double *x, void* userdata)
00112 {
00113   return vnl_conjugate_gradient::valuecomputer_(x, userdata);
00114 }
00115 
00116 extern "C" void vnl_conjugate_gradient__gradientcomputer_( double *g, double *x, void* userdata)
00117 {
00118   vnl_conjugate_gradient::gradientcomputer_(g,x, userdata);
00119 }
00120 
00121 extern "C" void vnl_conjugate_gradient__valueandgradientcomputer_( double *v, double *g, double *x, void* userdata)
00122 {
00123   vnl_conjugate_gradient::valueandgradientcomputer_(v,g,x, userdata);
00124 }
00125 
00126 extern "C" void vnl_conjugate_gradient__preconditioner_( double *out, double *in, void* userdata)
00127 {
00128   vnl_conjugate_gradient::preconditioner_(out,in, userdata);
00129 }
00130 
00131 #endif
00132 
00133 bool vnl_conjugate_gradient::minimize( vnl_vector<double> &x)
00134 {
00135   double *xp = x.data_block();
00136   double max_norm_of_gradient;
00137   long number_of_iterations;
00138   final_step_size_ = 0;
00139   double gradient_tolerance = gtol;
00140   vnl_vector<double> workspace(f_->get_number_of_unknowns()*3);
00141   long number_of_unknowns = f_->get_number_of_unknowns();
00142   long error_code;
00143 
00144   // Compute the initial value.
00145   start_error_ = valuecomputer_(xp, this);
00146   num_evaluations_ = 0;
00147 
00148   // Run the conjugate gradient algorithm.
00149   v3p_netlib_cg_(
00150        xp,
00151        &max_norm_of_gradient,
00152        &number_of_iterations,
00153        &final_step_size_,
00154        &gradient_tolerance,
00155        &maxfev,
00156        &number_of_unknowns,
00157        &number_of_unknowns,
00158 #ifdef VCL_SUNPRO_CC
00159        vnl_conjugate_gradient__valuecomputer_,
00160        vnl_conjugate_gradient__gradientcomputer_,
00161        vnl_conjugate_gradient__valueandgradientcomputer_,
00162        vnl_conjugate_gradient__preconditioner_,
00163 #else
00164        valuecomputer_,
00165        gradientcomputer_,
00166        valueandgradientcomputer_,
00167        preconditioner_,
00168 #endif
00169        workspace.data_block(),
00170        this,
00171        &error_code);
00172 
00173   // Check for an error condition.
00174   if (error_code > 0)
00175   {
00176     failure_code_ = ERROR_DODGY_INPUT;
00177     if (verbose_)
00178     {
00179       switch (error_code)
00180       {
00181         case 1:  vcl_cout << "UNABLE TO OBTAIN DESCENT DIRECTION\n"; break;
00182         case 2:  vcl_cout << "THE FUNCTION DECREASES WITH NO MINIMUM\n"; break;
00183         case 3:  vcl_cout << "PRECONDITIONER NOT POSITIVE DEFINITE\n"; break;
00184         case 4:  vcl_cout << "UNABLE TO SATISFY ARMIJO CONDITION\n"; break;
00185         default: vcl_cout << "UNKNOWN ERROR CODE\n"; break;
00186       }
00187     }
00188   }
00189 
00190   // Compute the final value.
00191   end_error_= valuecomputer_(xp, this);
00192   num_iterations_ = number_of_iterations;
00193 
00194   return error_code == 0;
00195 }
00196 
00197 
00198 void vnl_conjugate_gradient::diagnose_outcome(vcl_ostream& os) const
00199 {
00200   os << "vnl_conjugate_gradient: "
00201      << num_iterations_
00202      << " iterations, "
00203      << num_evaluations_
00204      << " evaluations. Cost function reported error"
00205      << f_->reported_error(start_error_)
00206      << '/'
00207      << f_->reported_error(end_error_)
00208      << " . Final step size = " << final_step_size_
00209      << vcl_endl;
00210 }
00211 
00212 void vnl_conjugate_gradient::diagnose_outcome() const
00213 {
00214   diagnose_outcome(vcl_cout);
00215 }