From: Kefu Chai Date: Tue, 5 Jan 2021 09:45:05 +0000 (+0800) Subject: common/mutex_debug: add memory barrier before load/store nlock X-Git-Tag: v16.1.0~62^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=966c78e18e9c5941bc1bbcdd71034509598fdd4e;p=ceph.git common/mutex_debug: add memory barrier before load/store nlock on some architectures, the CPU reorders the store-store and load-load instruction sequences, hence we cannot assume that nlock is always updated after locked_by in _post_lock(). in this change, nlock is changed from a plain int to atomic so we can use atomic primitives to store to / read from it. the major consumer of nlock and locked_by is `is_locked_by_me()`. so, locked_by is always guarded by release-acquire ordering of nlock when accessing this variable so the access to them are not reordered. `is_locked_by_me` is only enabled when `CEPH_DEBUG_MUTEX` macro is defined for debugging purpose. and this macro is enabled for Debug builds. the vanilla std::mutex is used for Release builds. so the problem addressed by this change only impacts the Debug builds on ARM64/ARM32 architectures. Signed-off-by: Kefu Chai --- diff --git a/src/common/mutex_debug.h b/src/common/mutex_debug.h index 4a0c2cbb447..c1a4ff2a435 100644 --- a/src/common/mutex_debug.h +++ b/src/common/mutex_debug.h @@ -15,6 +15,7 @@ #ifndef CEPH_COMMON_MUTEX_DEBUG_H #define CEPH_COMMON_MUTEX_DEBUG_H +#include #include #include @@ -38,7 +39,7 @@ protected: bool lockdep; // track this mutex using lockdep_* bool backtrace; // gather backtrace on lock acquisition - int nlock = 0; + std::atomic nlock = 0; std::thread::id locked_by = {}; bool _enable_lockdep() const { @@ -57,10 +58,10 @@ public: return (nlock > 0); } bool is_locked_by_me() const { - return nlock > 0 && locked_by == std::this_thread::get_id(); + return nlock.load(std::memory_order_acquire) > 0 && locked_by == std::this_thread::get_id(); } operator bool() const { - return nlock > 0 && locked_by == std::this_thread::get_id(); + return is_locked_by_me(); } }; @@ -152,17 +153,19 @@ public: if (!recursive) ceph_assert(nlock == 0); locked_by = std::this_thread::get_id(); - nlock++; + nlock.fetch_add(1, std::memory_order_release); } void _pre_unlock() { - ceph_assert(nlock > 0); - --nlock; + if (recursive) { + ceph_assert(nlock > 0); + } else { + ceph_assert(nlock == 1); + } ceph_assert(locked_by == std::this_thread::get_id()); - if (!recursive) - ceph_assert(nlock == 0); - if (nlock == 0) + if (nlock == 1) locked_by = std::thread::id(); + nlock.fetch_sub(1, std::memory_order_release); } bool try_lock(bool no_lockdep = false) {