{
std::lock_guard<std::recursive_mutex> l(lock);
_trim(0, 0);
- assert(_get_num_onodes() == 0);
- assert(_get_buffer_bytes() == 0);
}
void BlueStore::Cache::trim(
out_stop:
_kv_stop();
out_coll:
- flush_cache();
+ _flush_cache();
out_alloc:
_close_alloc();
out_fm:
dout(20) << __func__ << " stopping kv thread" << dendl;
_kv_stop();
_reap_collections();
- flush_cache();
+ _flush_cache();
dout(20) << __func__ << " closing" << dendl;
mounted = false;
out_scan:
mempool_thread.shutdown();
- flush_cache();
+ _flush_cache();
out_alloc:
_close_alloc();
out_fm:
}
-void BlueStore::flush_cache()
+void BlueStore::_flush_cache()
{
dout(10) << __func__ << dendl;
for (auto i : cache_shards) {
i->trim_all();
+ assert(i->empty());
}
for (auto& p : coll_map) {
assert(p.second->onode_map.empty());
coll_map.clear();
}
+// For external caller.
+// We use a best-effort policy instead, e.g.,
+// we don't care if there are still some pinned onodes/data in the cache
+// after this command is completed.
+void BlueStore::flush_cache()
+{
+ dout(10) << __func__ << dendl;
+ for (auto i : cache_shards) {
+ i->trim_all();
+ }
+}
+
void BlueStore::_apply_padding(uint64_t head_pad,
uint64_t tail_pad,
bufferlist& bl,
uint64_t *buffers,
uint64_t *bytes) = 0;
+ bool empty() {
+ std::lock_guard<std::recursive_mutex> l(lock);
+ return _get_num_onodes() == 0 && _get_buffer_bytes() == 0;
+ }
+
#ifdef DEBUG_CACHE
virtual void _audit(const char *s) = 0;
#else
void get_db_statistics(Formatter *f) override;
void generate_db_histogram(Formatter *f) override;
+ void _flush_cache();
void flush_cache() override;
void dump_perf_counters(Formatter *f) override {
f->open_object_section("perf_counters");