// Call deleter and free
void BinnedLRUCacheShard::EraseUnRefEntries() {
- ceph::autovector<BinnedLRUHandle*> last_reference_list;
+ BinnedLRUHandle* deleted = nullptr;
{
std::lock_guard<std::mutex> l(mutex_);
while (lru_.next != &lru_) {
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
- last_reference_list.push_back(old);
+ ceph_assert(!old->next);
+ old->next = deleted;
+ deleted = old;
}
}
- for (auto entry : last_reference_list) {
- entry->Free();
- }
+ FreeDeleted(deleted);
}
void BinnedLRUCacheShard::ApplyToAllCacheEntries(
}
void BinnedLRUCacheShard::EvictFromLRU(size_t charge,
- ceph::autovector<BinnedLRUHandle*>* deleted) {
+ BinnedLRUHandle*& deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
ceph_assert(old->InCache());
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
- deleted->push_back(old);
+ ceph_assert(!old->next);
+ old->next = deleted;
+ deleted = old;
}
}
void BinnedLRUCacheShard::SetCapacity(size_t capacity) {
- ceph::autovector<BinnedLRUHandle*> last_reference_list;
+ BinnedLRUHandle* deleted = nullptr;
{
std::lock_guard<std::mutex> l(mutex_);
capacity_ = capacity;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
- EvictFromLRU(0, &last_reference_list);
+ EvictFromLRU(0, deleted);
}
// we free the entries here outside of mutex for
// performance reasons
- for (auto entry : last_reference_list) {
- entry->Free();
- }
+ FreeDeleted(deleted);
}
void BinnedLRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) {
auto e = new BinnedLRUHandle();
rocksdb::Status s;
- ceph::autovector<BinnedLRUHandle*> last_reference_list;
+ BinnedLRUHandle* deleted = nullptr;
e->value = value;
e->deleter = deleter;
std::lock_guard<std::mutex> l(mutex_);
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty
- EvictFromLRU(charge, &last_reference_list);
+ EvictFromLRU(charge, deleted);
if (usage_ - lru_usage_ + charge > capacity_ &&
(strict_capacity_limit_ || handle == nullptr)) {
if (handle == nullptr) {
// Don't insert the entry but still return ok, as if the entry inserted
// into cache and get evicted immediately.
- last_reference_list.push_back(e);
+ ceph_assert(!e->next);
+ e->next = deleted;
+ deleted = e;
} else {
delete e;
*handle = nullptr;
// old is on LRU because it's in cache and its reference count
// was just 1 (Unref returned 0)
LRU_Remove(old);
- last_reference_list.push_back(old);
+ ceph_assert(!old->next);
+ old->next = deleted;
+ deleted = old;
}
}
if (handle == nullptr) {
// we free the entries here outside of mutex for
// performance reasons
- for (auto entry : last_reference_list) {
- entry->Free();
- }
+ FreeDeleted(deleted);
return s;
}
#include <boost/circular_buffer.hpp>
#include "ShardedCache.h"
-#include "common/autovector.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
// to hold (usage_ + charge) is freed or the lru list is empty
// This function is not thread safe - it needs to be executed while
// holding the mutex_
- void EvictFromLRU(size_t charge, ceph::autovector<BinnedLRUHandle*>* deleted);
+ void EvictFromLRU(size_t charge, BinnedLRUHandle*& deleted);
+
+ void FreeDeleted(BinnedLRUHandle* deleted) {
+ while (deleted) {
+ auto* entry = deleted;
+ deleted = deleted->next;
+ entry->Free();
+ }
+ }
// Initialized before use.
size_t capacity_;