core/vnl/vnl_cost_function.cxx
Go to the documentation of this file.
00001 // This is core/vnl/vnl_cost_function.cxx
00002 #ifdef VCL_NEEDS_PRAGMA_INTERFACE
00003 #pragma implementation
00004 #endif
00005 //:
00006 // \file
00007 // \author Andrew W. Fitzgibbon, Oxford RRG
00008 // \date   23 Oct 1997
00009 //
00010 //-----------------------------------------------------------------------------
00011 
00012 #include "vnl_cost_function.h"
00013 #include <vcl_cassert.h>
00014 
00015 static bool f_calling_compute;
00016 
00017 void vnl_cost_function::compute(vnl_vector<double> const& x, double *val, vnl_vector<double>* g)
00018 {
00019   if (val) *val = this->f(x);
00020   if (g) this->gradf(x, *g);
00021 }
00022 
00023 //: Default implementation of f is compute...
00024 double vnl_cost_function::f(vnl_vector<double> const& x)
00025 {
00026   // if we get back here from compute, neither vf was implemented.
00027   if (f_calling_compute)
00028     assert(!"vnl_cost_function: RECURSION");
00029   double val;
00030   f_calling_compute = true;
00031   this->compute(x, &val, 0);
00032   f_calling_compute = false;
00033   return val;
00034 }
00035 
00036 //: Default implementation of gradf is to call compute
00037 void vnl_cost_function::gradf(vnl_vector<double> const& x, vnl_vector<double>& g)
00038 {
00039   if (f_calling_compute)
00040     assert(!"vnl_cost_function: RECURSION");
00041   f_calling_compute = true;
00042   this->compute(x, 0, &g);
00043   f_calling_compute = false;
00044 }
00045 
00046 //: Compute fd gradient
00047 void vnl_cost_function::fdgradf(vnl_vector<double> const& x,
00048                                 vnl_vector<double> &  gradient,
00049                                 double stepsize )
00050 {
00051   vnl_vector<double> tx = x;
00052   double h = stepsize;
00053   for (int i = 0; i < dim; ++i) {
00054     double tplus = x[i] + h;
00055     tx[i] = tplus;
00056     double fplus = this->f(tx);
00057 
00058     double tminus = x[i] - h;
00059     tx[i] = tminus;
00060     double fminus = this->f(tx);
00061 
00062     gradient[i] = (fplus - fminus) / (tplus - tminus);
00063     tx[i] = x[i];
00064   }
00065 }
00066 
00067 vnl_vector<double> vnl_cost_function::gradf(vnl_vector<double> const& x)
00068 {
00069   vnl_vector<double> g(dim);
00070   this->gradf(x, g);
00071   return g;
00072 }
00073 
00074 vnl_vector<double> vnl_cost_function::fdgradf(vnl_vector<double> const& x)
00075 {
00076   vnl_vector<double> g(dim);
00077   this->fdgradf(x, g);
00078   return g;
00079 }