This ensures we maintain the cache size even for read workloads.
Signed-off-by: Sage Weil <sage@redhat.com>
assert(onode_map.count(oid) == 0);
onode_map[oid] = o;
lru.push_front(*o);
+ _trim(max_size);
}
BlueStore::OnodeRef BlueStore::OnodeHashLRU::lookup(const ghobject_t& oid)
int BlueStore::OnodeHashLRU::trim(int max)
{
std::lock_guard<std::mutex> l(lock);
- dout(20) << __func__ << " max " << max
- << " size " << onode_map.size() << dendl;
+ if (max < 0) {
+ max = max_size;
+ }
+ return _trim(max);
+}
+
+int BlueStore::OnodeHashLRU::_trim(int max)
+{
+ dout(20) << __func__ << " max " << max << " size " << onode_map.size() << dendl;
int trimmed = 0;
int num = onode_map.size() - max;
if (onode_map.size() == 0 || num <= 0)
void clear();
bool get_next(const ghobject_t& after, pair<ghobject_t,OnodeRef> *next);
int trim(int max=-1);
+ int _trim(int max);
};
struct Collection : public CollectionImpl {