contrib/mul/mbl/mbl_lru_cache.h
Go to the documentation of this file.
00001 // This is mul/mbl/mbl_lru_cache.h
00002 #ifndef mbl_lru_cache_h_
00003 #define mbl_lru_cache_h_
00004 //:
00005 // \file
00006 // \brief Least recently used cache.
00007 // \author Ian Scott
00008 
00009 #include <vcl_list.h>
00010 #include <vcl_map.h>
00011 #include <vcl_utility.h>
00012 #include <vcl_cassert.h>
00013 
00014 //: Least recently used cache
00015 // This cache is optimised for speed and is not very memory efficient.
00016 // \param I is the index type
00017 // \param V is the value type
00018 template <class I, class V>
00019 class mbl_lru_cache
00020 {
00021   //: Allow least recently used item to be found quickly $O(1)$.
00022   typedef vcl_list<I> list_type;
00023   //: Allow least recently used item to be found quickly $O(1)$.
00024   list_type l_;
00025 
00026   //: Allow value to be looked up quickly $O(\log(n))$ given index.
00027   typedef vcl_map<I, vcl_pair<V, typename list_type::iterator> > map_type;
00028   map_type m_;
00029   //: Limit of cache size.
00030   unsigned long n_;
00031 
00032 public:
00033 
00034   mbl_lru_cache(): n_(0) {}
00035 
00036 
00037   //: Create a cache of size n.
00038   // The actual memory size of the cache will be
00039   // n * (sizeof(V) + 2*sizeof(I) + overhead(map element) + overhead(list element).
00040   //
00041   // For many implementations overhead(list element) = 2 * sizeof(void *), and
00042   // overhead(map element) = 3 * sizeof(void *) + sizeof(enum).
00043   //
00044   // e.g. on a 32 bit computer, where I is a pair<unsigned, unsigned> and
00045   // V is a double, memory size = n * (8 + 16 + 16 + 8).
00046   // This makes the cache 17% space efficient - not very good.
00047   mbl_lru_cache(unsigned n):  n_(n) {}
00048 
00049   //: Lookup index in the cache
00050   // \return A pointer to the value if it is in the cache, or 0 if not .
00051   const V* lookup(const I& index)
00052   {
00053     assert (m_.size() == l_.size());
00054     typename map_type::iterator it= m_.find(index);
00055     if (it != m_.end())
00056     {
00057       l_.push_front(index);
00058       l_.erase((*it).second.second);
00059       (*it).second.second = l_.begin();
00060       return &((*it).second.first);
00061     }
00062     else return 0;
00063   }
00064 
00065 
00066   //: Insert this value into the cache.
00067   // For speed this method assumes that the index isn't already in the cache.
00068   // \param dont_if_full
00069   //     If true, and the cache is full, then the cache will not be modified.
00070   // \param value The value to be inserted in the cache
00071   // \param index Index to access the value
00072   void insert(const I& index, const V& value, bool dont_if_full=false)
00073   {
00074     assert (m_.size() == l_.size());
00075     assert (m_.find(index) == m_.end());
00076 
00077     if (m_.size() < n_)
00078     {
00079       l_.push_front(index);
00080       m_[index] = vcl_make_pair(value, l_.begin());
00081     }
00082     else
00083     {
00084       if (!dont_if_full)
00085       {
00086         l_.push_front(index);
00087         m_[index] = vcl_make_pair(value, l_.begin()) ;
00088         m_.erase( m_.find(l_.back()));
00089         l_.pop_back();
00090       }
00091     }
00092   }
00093   void clear() {l_.clear(); m_.clear();}
00094   bool full() const { return m_.size() < n_;}
00095   void resize(unsigned n)
00096   {
00097     while (m_.size() > n)
00098     {
00099       m_.erase( m_.find(l_.back()));
00100       l_.pop_back();
00101     }
00102     n_=n;
00103   }
00104 };
00105 
00106 #endif // mbl_lru_cache_h_