high_pri_pool_ratio_(high_pri_pool_ratio),
high_pri_pool_capacity_(0),
usage_(0),
- lru_usage_(0) {
+ lru_usage_(0),
+ erased_usage_(0) {
// Make empty circular linked list
lru_.next = &lru_;
lru_.prev = &lru_;
if (e->InHighPriPool()) {
assert(high_pri_pool_usage_ >= e->charge);
high_pri_pool_usage_ -= e->charge;
+ e->SetInHighPriPool(false);
+ }
+ if (e->IsFlaggedForErasure()) {
+ assert(erased_usage_ >= e->charge);
+ erased_usage_ -= e->charge;
+ e->SetFlaggedForErasure(false);
}
}
void LRUCacheShard::LRU_Insert(LRUHandle* e) {
assert(e->next == nullptr);
assert(e->prev == nullptr);
- if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
+ if (e->IsErased()) {
+ // Insert "e" to the tail of the LRU list.
+ e->next = lru_.next;
+ e->prev = &lru_;
+ e->prev->next = e;
+ e->next->prev = e;
+ e->SetInHighPriPool(false);
+ e->SetFlaggedForErasure(true);
+ lru_low_pri_ = e;
+ erased_usage_ += e->charge;
+ } else if (high_pri_pool_ratio_ > 0 && (e->IsHighPri() || e->HasHit())) {
// Inset "e" to head of LRU list.
e->next = &lru_;
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(true);
+ e->SetFlaggedForErasure(false);
high_pri_pool_usage_ += e->charge;
MaintainPoolSize();
} else {
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
+ e->SetFlaggedForErasure(false);
lru_low_pri_ = e;
}
lru_usage_ += e->charge;
}
+void LRUCacheShard::LRU_Demote(LRUHandle* e) {
+ assert(e->next != nullptr);
+ assert(e->prev != nullptr);
+ if (lru_low_pri_ == e) {
+ lru_low_pri_ = e->prev;
+ }
+ e->next->prev = e->prev;
+ e->prev->next = e->next;
+ e->next = lru_.next;
+ e->prev = &lru_;
+ e->next->prev = e;
+ e->prev->next = e;
+
+ if (e->InHighPriPool()) {
+ assert(high_pri_pool_usage_ >= e->charge);
+ high_pri_pool_usage_ -= e->charge;
+ e->SetInHighPriPool(false);
+ }
+ if (!e->IsErased()) {
+ erased_usage_ += e->charge;
+ e->SetErased(true);
+ }
+}
+
void LRUCacheShard::MaintainPoolSize() {
while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
// Overflow last entry in high-pri pool to low-pri pool.
}
if (e->refs == 1 && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
+
if (usage_ > capacity_ || force_erase) {
// the cache is full
// The LRU list must be empty since the cache is full
if (last_reference) {
e->Free();
}
+
return last_reference;
}
if (last_reference) {
usage_ -= e->charge;
}
+
if (last_reference && e->InCache()) {
LRU_Remove(e);
}
+
+ e->SetErased(true);
e->SetInCache(false);
}
}
if (last_reference) {
e->Free();
}
+
}
size_t LRUCacheShard::GetUsage() const {
return high_pri_pool_usage_;
}
+size_t LRUCacheShard::GetErasedUsage() const {
+ MutexLock l(&mutex_);
+ return erased_usage_;
+}
+
std::string LRUCacheShard::GetPrintableOptions() const {
const int kBufferSize = 200;
char buffer[kBufferSize];
}
}
+size_t LRUCache::GetErasedUsage() const {
+ size_t size = 0;
+ for (int i = 0; i < num_shards_; i++) {
+ size += shards_[i].GetErasedUsage();
+ }
+ return size;
+}
+
+
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit,
// in_cache: whether this entry is referenced by the hash table.
// is_high_pri: whether this entry is high priority entry.
// in_high_pri_pool: whether this entry is in high-pri pool.
+ // is_erased: whether this entry is erased (but may still have references!)
+ // is_flagged_for_erasure: whether this entry is flagged for erasure
char flags;
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
bool IsHighPri() { return flags & 2; }
bool InHighPriPool() { return flags & 4; }
bool HasHit() { return flags & 8; }
+ bool IsErased() { return flags & 16; }
+ bool IsFlaggedForErasure() { return flags & 32; }
void SetInCache(bool in_cache) {
if (in_cache) {
void SetHit() { flags |= 8; }
+ void SetErased(bool erased) {
+ if (erased) {
+ flags |= 16;
+ } else {
+ flags &= ~16;
+ }
+ }
+
+ void SetFlaggedForErasure(bool erased) {
+ if (erased) {
+ flags |= 32;
+ } else {
+ flags &= ~32;
+ }
+ }
+
void Free() {
assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
if (deleter) {
virtual size_t GetHighPriPoolUsage() const;
virtual double GetHighPriPoolRatio() const;
+ virtual size_t GetErasedUsage() const;
+
private:
void LRU_Remove(LRUHandle* e);
void LRU_Insert(LRUHandle* e);
+ void LRU_Demote(LRUHandle* e);
// Overflow the last entry in high-pri pool to low-pri pool until size of
// high-pri pool is no larger than the size specify by high_pri_pool_pct.
// Memory size for entries residing only in the LRU list
size_t lru_usage_;
+ // Memory size for entries that will be erased
+ size_t erased_usage_;
+
// mutex_ protects the following state.
// We don't count mutex_ as the cache's internal state so semantically we
// don't mind mutex_ invoking the non-const actions.
virtual size_t GetHighPriPoolUsage() const override;
virtual double GetHighPriPoolRatio() const override;
virtual void SetHighPriPoolRatio(double high_pri_pool_ratio) override;
+
+ virtual size_t GetErasedUsage() const override;
// Retrieves number of elements in LRU, for unit test purpose only
size_t TEST_GetLRUSize();