From 3a258cb1ce5f64305b0115a0b14c5cc63cef5e80 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Fri, 18 Jan 2019 19:43:01 +0800 Subject: [PATCH] os/memstore: use ceph::mutex and friends it is another step to ditch RWLock, and paves the road to lockless memstore which can be used by crimson-osd directly. Signed-off-by: Kefu Chai --- src/os/memstore/MemStore.cc | 108 +++++++++++++++++------------------- src/os/memstore/MemStore.h | 27 +++++---- 2 files changed, 66 insertions(+), 69 deletions(-) diff --git a/src/os/memstore/MemStore.cc b/src/os/memstore/MemStore.cc index ea87c80cc84..c8a46b47c02 100644 --- a/src/os/memstore/MemStore.cc +++ b/src/os/memstore/MemStore.cc @@ -244,7 +244,7 @@ objectstore_perf_stat_t MemStore::get_cur_stats() MemStore::CollectionRef MemStore::get_collection(const coll_t& cid) { - RWLock::RLocker l(coll_lock); + std::shared_lock l{coll_lock}; ceph::unordered_map::iterator cp = coll_map.find(cid); if (cp == coll_map.end()) return CollectionRef(); @@ -253,7 +253,7 @@ MemStore::CollectionRef MemStore::get_collection(const coll_t& cid) ObjectStore::CollectionHandle MemStore::create_new_collection(const coll_t& cid) { - RWLock::WLocker l(coll_lock); + std::lock_guard l{coll_lock}; Collection *c = new Collection(cct, cid); new_coll_map[cid] = c; return c; @@ -372,7 +372,7 @@ int MemStore::getattr(CollectionHandle &c_, const ghobject_t& oid, if (!o) return -ENOENT; string k(name); - std::lock_guard lock(o->xattr_mutex); + std::lock_guard lock{o->xattr_mutex}; if (!o->xattr.count(k)) { return -ENODATA; } @@ -391,7 +391,7 @@ int MemStore::getattrs(CollectionHandle &c_, const ghobject_t& oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->xattr_mutex); + std::lock_guard lock{o->xattr_mutex}; aset = o->xattr; return 0; } @@ -399,7 +399,7 @@ int MemStore::getattrs(CollectionHandle &c_, const ghobject_t& oid, int MemStore::list_collections(vector& ls) { dout(10) << __func__ << dendl; - RWLock::RLocker l(coll_lock); + std::shared_lock l{coll_lock}; for (ceph::unordered_map::iterator p = coll_map.begin(); p != coll_map.end(); ++p) { @@ -411,7 +411,7 @@ int MemStore::list_collections(vector& ls) bool MemStore::collection_exists(const coll_t& cid) { dout(10) << __func__ << " " << cid << dendl; - RWLock::RLocker l(coll_lock); + std::shared_lock l{coll_lock}; return coll_map.count(cid); } @@ -419,7 +419,7 @@ int MemStore::collection_empty(CollectionHandle& ch, bool *empty) { dout(10) << __func__ << " " << ch->cid << dendl; CollectionRef c = static_cast(ch.get()); - RWLock::RLocker l(c->lock); + std::shared_lock l{c->lock}; *empty = c->object_map.empty(); return 0; } @@ -428,7 +428,7 @@ int MemStore::collection_bits(CollectionHandle& ch) { dout(10) << __func__ << " " << ch->cid << dendl; Collection *c = static_cast(ch.get()); - RWLock::RLocker l(c->lock); + std::shared_lock l{c->lock}; return c->bits; } @@ -439,7 +439,7 @@ int MemStore::collection_list(CollectionHandle& ch, vector *ls, ghobject_t *next) { Collection *c = static_cast(ch.get()); - RWLock::RLocker l(c->lock); + std::shared_lock l{c->lock}; dout(10) << __func__ << " cid " << ch->cid << " start " << start << " end " << end << dendl; @@ -473,7 +473,7 @@ int MemStore::omap_get( ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; *header = o->omap_header; *out = o->omap; return 0; @@ -491,7 +491,7 @@ int MemStore::omap_get_header( ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; *header = o->omap_header; return 0; } @@ -507,7 +507,7 @@ int MemStore::omap_get_keys( ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; for (map::iterator p = o->omap.begin(); p != o->omap.end(); ++p) @@ -527,7 +527,7 @@ int MemStore::omap_get_values( ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; for (set::const_iterator p = keys.begin(); p != keys.end(); ++p) { @@ -550,7 +550,7 @@ int MemStore::omap_check_keys( ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; for (set::const_iterator p = keys.begin(); p != keys.end(); ++p) { @@ -570,35 +570,35 @@ public: : c(c), o(o), it(o->omap.begin()) {} int seek_to_first() override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; it = o->omap.begin(); return 0; } int upper_bound(const string &after) override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; it = o->omap.upper_bound(after); return 0; } int lower_bound(const string &to) override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; it = o->omap.lower_bound(to); return 0; } bool valid() override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; return it != o->omap.end(); } int next() override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; ++it; return 0; } string key() override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; return it->first; } bufferlist value() override { - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; return it->second; } int status() override { @@ -632,8 +632,7 @@ int MemStore::queue_transactions( // Sequencer with a mutex. this guarantees ordering on a given sequencer, // while allowing operations on different sequencers to happen in parallel Collection *c = static_cast(ch.get()); - std::unique_lock lock; - lock = std::unique_lock(c->sequencer_mutex); + std::unique_lock lock{c->sequencer_mutex}; for (vector::iterator p = tls.begin(); p != tls.end(); ++p) { // poke the TPHandle heartbeat just to exercise that code path @@ -1084,7 +1083,7 @@ int MemStore::_remove(const coll_t& cid, const ghobject_t& oid) CollectionRef c = get_collection(cid); if (!c) return -ENOENT; - RWLock::WLocker l(c->lock); + std::lock_guard l{c->lock}; auto i = c->object_hash.find(oid); if (i == c->object_hash.end()) @@ -1107,7 +1106,7 @@ int MemStore::_setattrs(const coll_t& cid, const ghobject_t& oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->xattr_mutex); + std::lock_guard lock{o->xattr_mutex}; for (map::const_iterator p = aset.begin(); p != aset.end(); ++p) o->xattr[p->first] = p->second; return 0; @@ -1123,7 +1122,7 @@ int MemStore::_rmattr(const coll_t& cid, const ghobject_t& oid, const char *name ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->xattr_mutex); + std::lock_guard lock{o->xattr_mutex}; auto i = o->xattr.find(name); if (i == o->xattr.end()) return -ENODATA; @@ -1141,7 +1140,7 @@ int MemStore::_rmattrs(const coll_t& cid, const ghobject_t& oid) ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->xattr_mutex); + std::lock_guard lock{o->xattr_mutex}; o->xattr.clear(); return 0; } @@ -1163,12 +1162,10 @@ int MemStore::_clone(const coll_t& cid, const ghobject_t& oldoid, no->clone(oo.get(), 0, oo->get_size(), 0); // take xattr and omap locks with std::lock() - std::unique_lock - ox_lock(oo->xattr_mutex, std::defer_lock), - nx_lock(no->xattr_mutex, std::defer_lock), - oo_lock(oo->omap_mutex, std::defer_lock), - no_lock(no->omap_mutex, std::defer_lock); - std::lock(ox_lock, nx_lock, oo_lock, no_lock); + std::scoped_lock l{oo->xattr_mutex, + no->xattr_mutex, + oo->omap_mutex, + no->omap_mutex}; no->omap_header = oo->omap_header; no->omap = oo->omap; @@ -1214,7 +1211,7 @@ int MemStore::_omap_clear(const coll_t& cid, const ghobject_t &oid) ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; o->omap.clear(); o->omap_header.clear(); return 0; @@ -1231,7 +1228,7 @@ int MemStore::_omap_setkeys(const coll_t& cid, const ghobject_t &oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; auto p = aset_bl.cbegin(); __u32 num; decode(num, p); @@ -1254,7 +1251,7 @@ int MemStore::_omap_rmkeys(const coll_t& cid, const ghobject_t &oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; auto p = keys_bl.cbegin(); __u32 num; decode(num, p); @@ -1278,7 +1275,7 @@ int MemStore::_omap_rmkeyrange(const coll_t& cid, const ghobject_t &oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; map::iterator p = o->omap.lower_bound(first); map::iterator e = o->omap.lower_bound(last); o->omap.erase(p, e); @@ -1296,7 +1293,7 @@ int MemStore::_omap_setheader(const coll_t& cid, const ghobject_t &oid, ObjectRef o = c->get_object(oid); if (!o) return -ENOENT; - std::lock_guard lock(o->omap_mutex); + std::lock_guard lock{o->omap_mutex}; o->omap_header = bl; return 0; } @@ -1304,7 +1301,7 @@ int MemStore::_omap_setheader(const coll_t& cid, const ghobject_t &oid, int MemStore::_create_collection(const coll_t& cid, int bits) { dout(10) << __func__ << " " << cid << dendl; - RWLock::WLocker l(coll_lock); + std::lock_guard l{coll_lock}; auto result = coll_map.insert(std::make_pair(cid, CollectionRef())); if (!result.second) return -EEXIST; @@ -1319,12 +1316,12 @@ int MemStore::_create_collection(const coll_t& cid, int bits) int MemStore::_destroy_collection(const coll_t& cid) { dout(10) << __func__ << " " << cid << dendl; - RWLock::WLocker l(coll_lock); + std::lock_guard l{coll_lock}; ceph::unordered_map::iterator cp = coll_map.find(cid); if (cp == coll_map.end()) return -ENOENT; { - RWLock::RLocker l2(cp->second->lock); + std::shared_lock l2{cp->second->lock}; if (!cp->second->object_map.empty()) return -ENOTEMPTY; cp->second->exists = false; @@ -1343,8 +1340,9 @@ int MemStore::_collection_add(const coll_t& cid, const coll_t& ocid, const ghobj CollectionRef oc = get_collection(ocid); if (!oc) return -ENOENT; - RWLock::WLocker l1(std::min(&(*c), &(*oc))->lock); - RWLock::WLocker l2(std::max(&(*c), &(*oc))->lock); + + std::scoped_lock l{std::min(&(*c), &(*oc))->lock, + std::max(&(*c), &(*oc))->lock}; if (c->object_hash.count(oid)) return -EEXIST; @@ -1370,14 +1368,12 @@ int MemStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& ol // note: c and oc may be the same ceph_assert(&(*c) == &(*oc)); - c->lock.get_write(); - int r = -EEXIST; + std::lock_guard l{c->lock}; if (c->object_hash.count(oid)) - goto out; - r = -ENOENT; + return -EEXIST; if (oc->object_hash.count(oldoid) == 0) - goto out; + return -ENOENT; { ObjectRef o = oc->object_hash[oldoid]; c->object_map[oid] = o; @@ -1385,10 +1381,7 @@ int MemStore::_collection_move_rename(const coll_t& oldcid, const ghobject_t& ol oc->object_map.erase(oldoid); oc->object_hash.erase(oldoid); } - r = 0; - out: - c->lock.put_write(); - return r; + return 0; } int MemStore::_split_collection(const coll_t& cid, uint32_t bits, uint32_t match, @@ -1402,8 +1395,9 @@ int MemStore::_split_collection(const coll_t& cid, uint32_t bits, uint32_t match CollectionRef dc = get_collection(dest); if (!dc) return -ENOENT; - RWLock::WLocker l1(std::min(&(*sc), &(*dc))->lock); - RWLock::WLocker l2(std::max(&(*sc), &(*dc))->lock); + + std::scoped_lock l{std::min(&(*sc), &(*dc))->lock, + std::max(&(*sc), &(*dc))->lock}; map::iterator p = sc->object_map.begin(); while (p != sc->object_map.end()) { @@ -1435,8 +1429,8 @@ int MemStore::_merge_collection(const coll_t& cid, uint32_t bits, coll_t dest) if (!dc) return -ENOENT; { - RWLock::WLocker l1(std::min(&(*sc), &(*dc))->lock); - RWLock::WLocker l2(std::max(&(*sc), &(*dc))->lock); + std::scoped_lock l{std::min(&(*sc), &(*dc))->lock, + std::max(&(*sc), &(*dc))->lock}; map::iterator p = sc->object_map.begin(); while (p != sc->object_map.end()) { @@ -1451,7 +1445,7 @@ int MemStore::_merge_collection(const coll_t& cid, uint32_t bits, coll_t dest) } { - RWLock::WLocker l(coll_lock); + std::lock_guard l{coll_lock}; ceph::unordered_map::iterator cp = coll_map.find(cid); ceph_assert(cp != coll_map.end()); used_bytes -= cp->second->used_bytes(); diff --git a/src/os/memstore/MemStore.h b/src/os/memstore/MemStore.h index 77b2a32c6e7..367d6245e72 100644 --- a/src/os/memstore/MemStore.h +++ b/src/os/memstore/MemStore.h @@ -30,8 +30,8 @@ class MemStore : public ObjectStore { public: struct Object : public RefCountedObject { - std::mutex xattr_mutex; - std::mutex omap_mutex; + ceph::mutex xattr_mutex{ceph::make_mutex("MemStore::Object::xattr_mutex")}; + ceph::mutex omap_mutex{ceph::make_mutex("MemStore::Object::omap_mutex")}; map xattr; bufferlist omap_header; map omap; @@ -101,9 +101,13 @@ public: ceph::unordered_map object_hash; ///< for lookup map object_map; ///< for iteration map xattr; - RWLock lock; ///< for object_{map,hash} - bool exists; - std::mutex sequencer_mutex; + /// for object_{map,hash} + ceph::shared_mutex lock{ + ceph::make_shared_mutex("MemStore::Collection::lock", true, false)}; + + bool exists = true; + ceph::mutex sequencer_mutex{ + ceph::make_mutex("MemStore::Collection::sequencer_mutex")}; typedef boost::intrusive_ptr Ref; friend void intrusive_ptr_add_ref(Collection *c) { c->get(); } @@ -117,7 +121,7 @@ public: // level. ObjectRef get_object(ghobject_t oid) { - RWLock::RLocker l(lock); + std::shared_lock l{lock}; auto o = object_hash.find(oid); if (o == object_hash.end()) return ObjectRef(); @@ -125,7 +129,7 @@ public: } ObjectRef get_or_create_object(ghobject_t oid) { - RWLock::WLocker l(lock); + std::lock_guard l{lock}; auto result = object_hash.emplace(oid, ObjectRef()); if (result.second) object_map[oid] = result.first->second = create_object(); @@ -183,9 +187,7 @@ public: explicit Collection(CephContext *cct, coll_t c) : CollectionImpl(c), cct(cct), - use_page_set(cct->_conf->memstore_page_set), - lock("MemStore::Collection::lock", true, false), - exists(true) {} + use_page_set(cct->_conf->memstore_page_set) {} }; typedef Collection::Ref CollectionRef; @@ -194,7 +196,9 @@ private: ceph::unordered_map coll_map; - RWLock coll_lock; ///< rwlock to protect coll_map + /// rwlock to protect coll_map + ceph::shared_mutex coll_lock{ + ceph::make_shared_mutex("MemStore::coll_lock")}; map new_coll_map; CollectionRef get_collection(const coll_t& cid); @@ -244,7 +248,6 @@ private: public: MemStore(CephContext *cct, const string& path) : ObjectStore(cct, path), - coll_lock("MemStore::coll_lock"), finisher(cct), used_bytes(0) {} ~MemStore() override { } -- 2.39.5