return c;
}
+void BlueStore::Cache::trim_all()
+{
+ std::lock_guard<std::recursive_mutex> l(lock);
+ _trim(0, 0);
+ assert(_get_num_onodes() == 0);
+ assert(_get_buffer_bytes() == 0);
+}
+
void BlueStore::Cache::trim(
uint64_t target_bytes,
float target_meta_ratio,
int max_skipped = g_conf->bluestore_cache_trim_max_skip_pinned;
while (num > 0) {
Onode *o = &*p;
+ dout(20) << __func__ << " considering " << o << dendl;
int refs = o->nref.load();
if (refs > 1) {
dout(20) << __func__ << " " << o->oid << " has " << refs
}
out_coll:
flush_cache();
- coll_map.clear();
out_alloc:
_close_alloc();
out_fm:
}
_reap_collections();
flush_cache();
- coll_map.clear();
dout(20) << __func__ << " closing" << dendl;
mounted = false;
r = bluefs->fsck();
if (r < 0) {
flush_cache();
- coll_map.clear();
goto out_alloc;
}
if (r > 0)
out_scan:
flush_cache();
- coll_map.clear();
out_alloc:
_close_alloc();
out_fm:
for (auto i : cache_shards) {
i->trim_all();
}
+ for (auto& p : coll_map) {
+ assert(p.second->onode_map.empty());
+ assert(p.second->shared_blob_set.empty());
+ }
+ coll_map.clear();
}
// ===========================================
void trim(uint64_t target_bytes, float target_meta_ratio,
float bytes_per_onode);
- void trim_all() {
- _trim(0, 0);
- }
+ void trim_all();
virtual void _trim(uint64_t onode_max, uint64_t buffer_max) = 0;