size_t charge,
void (*deleter)(const rocksdb::Slice& key, void* value),
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) {
- // Allocate the memory here outside of the mutex
- // If the cache is full, we'll have to release it
- // It shouldn't happen very often though.
- BinnedLRUHandle* e = reinterpret_cast<BinnedLRUHandle*>(
- new char[sizeof(BinnedLRUHandle) - 1 + key.size()]);
+ auto e = new BinnedLRUHandle();
rocksdb::Status s;
ceph::autovector<BinnedLRUHandle*> last_reference_list;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
+ e->key_data = new char[e->key_length];
e->flags = 0;
e->hash = hash;
e->refs = (handle == nullptr
e->next = e->prev = nullptr;
e->SetInCache(true);
e->SetPriority(priority);
- memcpy(e->key_data, key.data(), key.size());
+ std::copy_n(key.data(), e->key_length, e->key_data);
{
std::lock_guard<std::mutex> l(mutex_);
// into cache and get evicted immediately.
last_reference_list.push_back(e);
} else {
- delete[] reinterpret_cast<char*>(e);
+ delete e;
*handle = nullptr;
s = rocksdb::Status::Incomplete("Insert failed due to LRU cache being full.");
}
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
- char key_data[1]; // Beginning of key
+ char* key_data = nullptr; // Beginning of key
rocksdb::Slice key() const {
// For cheaper lookups, we allow a temporary Handle object
if (deleter) {
(*deleter)(key(), value);
}
- delete[] reinterpret_cast<char*>(this);
+ delete[] key_data;
+ delete this;
}
};