From 7260166da20bfe31469498e55265b40a12f80652 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 10 Oct 2016 15:59:51 -0400 Subject: [PATCH] os/bluestore: use std::unordered_map for SharedBlob lookup Many blobs aren't shared. Save 8 bytes per SharedBlob by using a normal unordered_map instead of instrusive::set. More importantly, perhaps, it avoids us having to tune the intrusive unordered_set size manually. std::unordered_map does this automatically for you, but the intrusive one does not. And it's unclear how to statically size it given that it's a per-collection structure and we have no idea how many objects we'll have, how many blobs per object, and how many objects will be cloned. Signed-off-by: Sage Weil --- src/os/bluestore/BlueStore.cc | 5 --- src/os/bluestore/BlueStore.h | 33 ++++++-------------- src/test/objectstore/test_bluestore_types.cc | 1 + 3 files changed, 11 insertions(+), 28 deletions(-) diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 5d53f227c6f0..27b78173d961 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -2187,11 +2187,6 @@ BlueStore::Collection::Collection(BlueStore *ns, Cache *c, coll_t cid) cid(cid), lock("BlueStore::Collection::lock", true, false), exists(true), - // size the shared blob hash table as a ratio of the onode cache size. - shared_blob_set(MAX(16, - g_conf->bluestore_onode_cache_size / - store->cache_shards.size() * - g_conf->bluestore_shared_blob_hash_table_size_ratio)), onode_map(c) { } diff --git a/src/os/bluestore/BlueStore.h b/src/os/bluestore/BlueStore.h index 0bfc7d75a8f8..78c44de93899 100644 --- a/src/os/bluestore/BlueStore.h +++ b/src/os/bluestore/BlueStore.h @@ -318,7 +318,7 @@ public: struct SharedBlobSet; /// in-memory shared blob state (incl cached buffers) - struct SharedBlob : public boost::intrusive::unordered_set_base_hook<> { + struct SharedBlob { std::atomic_int nref = {0}; ///< reference count // these are defined/set if the shared_blob is 'loaded' @@ -357,37 +357,24 @@ public: /// a lookup table of SharedBlobs struct SharedBlobSet { - typedef boost::intrusive::unordered_set::bucket_type bucket_type; - typedef boost::intrusive::unordered_set::bucket_traits bucket_traits; - std::mutex lock; ///< protect lookup, insertion, removal - int num_buckets; - vector buckets; - boost::intrusive::unordered_set uset; - - SharedBlob dummy; ///< for lookups - explicit SharedBlobSet(unsigned n) - : num_buckets(n), - buckets(n), - uset(bucket_traits(buckets.data(), num_buckets)), - dummy(0, string(), nullptr) { - assert(n > 0); - } + // we use a bare pointer because we don't want to affect the ref + // count + std::unordered_map sb_map; SharedBlobRef lookup(uint64_t sbid) { std::lock_guard l(lock); - dummy.sbid = sbid; - auto p = uset.find(dummy); - if (p == uset.end()) { + auto p = sb_map.find(sbid); + if (p == sb_map.end()) { return nullptr; } - return &*p; + return p->second; } void add(SharedBlob *sb) { std::lock_guard l(lock); - uset.insert(*sb); + sb_map[sb->sbid] = sb; sb->parent_set = this; } @@ -395,7 +382,7 @@ public: std::lock_guard l(lock); if (sb->nref == 0) { assert(sb->parent_set == this); - uset.erase(*sb); + sb_map.erase(sb->sbid); return true; } return false; @@ -403,7 +390,7 @@ public: bool empty() { std::lock_guard l(lock); - return uset.empty(); + return sb_map.empty(); } }; diff --git a/src/test/objectstore/test_bluestore_types.cc b/src/test/objectstore/test_bluestore_types.cc index 2c2911dd56a6..e2c4028cdef4 100644 --- a/src/test/objectstore/test_bluestore_types.cc +++ b/src/test/objectstore/test_bluestore_types.cc @@ -34,6 +34,7 @@ TEST(bluestore, sizeof) { P(std::atomic_int); P(BlueStore::SharedBlobRef); P(boost::intrusive::set_base_hook<>); + P(boost::intrusive::unordered_set_base_hook<>); P(bufferlist); cout << "map\t" << sizeof(map) << std::endl; cout << "map\t" << sizeof(map) << std::endl; -- 2.47.3