contrib/mul/clsfy/clsfy_smo_base.cxx
Go to the documentation of this file.
00001 // This is mul/clsfy/clsfy_smo_base.cxx
00002 #include "clsfy_smo_base.h"
00003 //:
00004 // \file
00005 // \author Ian Scott
00006 // \date 14-Nov-2001
00007 // \brief Sequential Minimum Optimisation algorithm
00008 // This code is based on the C++ code of
00009 // Xianping Ge, ( http://www.ics.uci.edu/~xge ) which he kindly
00010 // put in the public domain.
00011 // That code was in turn based on the algorithms of
00012 // John Platt, ( http://research.microsoft.com/~jplatt ) described in
00013 // Platt, J. C. (1998). Fast Training of Support Vector Machines Using Sequential
00014 // Minimal Optimisation. In Advances in Kernel Methods - Support Vector Learning.
00015 // B. Scholkopf, C. Burges and A. Smola, MIT Press: 185-208. and other papers.
00016 
00017 #include <vcl_cassert.h>
00018 
00019 // ----------------------------------------------------------------
00020 
00021 double clsfy_smo_base::error()
00022 {
00023   return error_;
00024 }
00025 
00026 // ----------------------------------------------------------------
00027 
00028 //: Access the data points
00029 const vnl_vector<double> & clsfy_smo_base::data_point(unsigned long l)
00030 {
00031   data_->set_index(l);
00032   return data_->current();
00033 }
00034 
00035 // ----------------------------------------------------------------
00036 
00037 double clsfy_smo_base::learned_func(int k)
00038 {
00039   double s = -b_;
00040   const unsigned long N = data_->size();
00041   for (unsigned int i=0; i<N; i++)
00042     if (alph_[i] > 0)
00043       s += alph_[i]*target_[i]*kernel(i,k);
00044 
00045   return s;
00046 }
00047 
00048 // ----------------------------------------------------------------
00049 
00050 //: Get the optimised parameters
00051 const vnl_vector<double>& clsfy_smo_base::lagrange_mults() const
00052 {
00053   return alph_;
00054 }
00055 
00056 // ----------------------------------------------------------------
00057 
00058 //: Set the initial values of the parameters to be optimised.
00059 // The caller is responsible for ensuring that the initial values
00060 // fulfill the constraints;
00061 void clsfy_smo_base::set_lagrange_mults(const vnl_vector<double>& lagrange_mults)
00062 {
00063   alph_ = lagrange_mults;
00064 }
00065 
00066 // ----------------------------------------------------------------
00067 
00068 double clsfy_smo_base::bias()
00069 {
00070   return b_;
00071 }
00072 
00073 // ----------------------------------------------------------------
00074 
00075 //: Reseeds the internal random number generator.
00076 // To achieve quasi-random initialisation use;
00077 // \code
00078 // #include <vcl_ctime.h>
00079 // ..
00080 // sampler.reseed(vcl_time(0));
00081 // \endcode
00082 void clsfy_smo_base::reseed(unsigned long seed)
00083 {
00084   rng_.reseed(seed);
00085 }
00086 
00087 // ----------------------------------------------------------------
00088 
00089 //: Amount by which a sample can violate the KKT conditions
00090 const double& clsfy_smo_base::tolerance() const
00091 {
00092   return tolerance_;
00093 }
00094 
00095 // ----------------------------------------------------------------
00096 
00097 //: Set the amount by which a sample can violate the KKT conditions.
00098 // Default value is 0.001
00099 void clsfy_smo_base::set_tolerance(double tolerance)
00100 {
00101   assert(tolerance >= 0.0);
00102   tolerance_ = tolerance;
00103 }
00104 
00105 // ----------------------------------------------------------------
00106 
00107 //: Tolerance on several equalities.
00108 // Including testing if a Lagrange multiplier is at one of the bounds.
00109 double clsfy_smo_base::eps() const
00110 {
00111   return eps_;
00112 }
00113 
00114 // ----------------------------------------------------------------
00115 
00116 //: Set the tolerance on several equalities.
00117 // Including testing if a Lagrange multiplier is at one of the bounds.
00118 // Default value is 0.001;
00119 void clsfy_smo_base::set_eps(double eps)
00120 {
00121   assert(eps >= 0.0);
00122   eps_ = eps;
00123 }
00124 
00125 // ----------------------------------------------------------------
00126 
00127 clsfy_smo_base::clsfy_smo_base():
00128   error_(0.0), data_(0), tolerance_(0.001), eps_(0.001), b_(0.0), rng_(9667566)
00129 {
00130 }
00131 
00132 // ----------------------------------------------------------------
00133 
00134 clsfy_smo_base::~clsfy_smo_base()
00135 {
00136   delete data_;
00137 }
00138 
00139 // ----------------------------------------------------------------
00140 
00141 double clsfy_smo_base::error_rate()
00142 {
00143     int n_total = 0;
00144     int n_error = 0;
00145     for (unsigned int i=0; i<data_->size(); ++i) {
00146       if ((learned_func(i) > 0) != (target_[i] > 0)) // meaning: signs are different
00147         ++n_error;
00148       ++n_total;
00149     }
00150     return double(n_error)/double(n_total);
00151 }