From: Kefu Chai Date: Mon, 11 Nov 2019 14:11:48 +0000 (+0800) Subject: msg/async/rdma: do not init mutex before lockdeps is ready X-Git-Tag: v15.1.0~872^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=56c03cc6a7f7a766eb74295ed57029039c56692a;p=ceph-ci.git msg/async/rdma: do not init mutex before lockdeps is ready if we build ceph with lockdeps enabled, when a mutex is constructed, a new id is allocated for it from the global free_ids table. but this table is not initialized until lockdep_register_ceph_context() is called. this function is not called until the global `CephContext` instance is constructed. but, the order of initialization of global variables is not defined. so we cannot let the initialization of one global variable depend on the existence of one or more other global variables. so, in this change, the mutex is initialized by a static method instead. Fixes: https://tracker.ceph.com/issues/42742 Signed-off-by: Kefu Chai --- diff --git a/src/msg/async/rdma/Infiniband.cc b/src/msg/async/rdma/Infiniband.cc index cdc438c61c2..35444fdcde4 100644 --- a/src/msg/async/rdma/Infiniband.cc +++ b/src/msg/async/rdma/Infiniband.cc @@ -843,20 +843,22 @@ void Infiniband::MemoryManager::MemPoolContext::update_stats(int nbufs) void *Infiniband::MemoryManager::mem_pool::slow_malloc() { - void *p; - - std::lock_guard l{PoolAllocator::lock}; - PoolAllocator::g_ctx = ctx; // this will trigger pool expansion via PoolAllocator::malloc() - p = boost::pool::malloc(); - PoolAllocator::g_ctx = nullptr; - return p; + return PoolAllocator::with_context(ctx, [this] { + return boost::pool::malloc(); + }); } -Infiniband::MemoryManager::MemPoolContext *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr; -ceph::mutex Infiniband::MemoryManager::PoolAllocator::lock = - ceph::make_mutex("pool-alloc-lock"); + +Infiniband::MemoryManager::MemPoolContext* +Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr; // lock is taken by mem_pool::slow_malloc() +ceph::mutex& Infiniband::MemoryManager::PoolAllocator::get_lock() +{ + static ceph::mutex lock = ceph::make_mutex("pool-alloc-lock"); + return lock; +} + char *Infiniband::MemoryManager::PoolAllocator::malloc(const size_type block_size) { ceph_assert(g_ctx); @@ -904,7 +906,7 @@ char *Infiniband::MemoryManager::PoolAllocator::malloc(const size_type block_siz void Infiniband::MemoryManager::PoolAllocator::free(char * const block) { mem_info *m; - std::lock_guard l{lock}; + std::lock_guard l{get_lock()}; Chunk *mem_info_chunk = reinterpret_cast(block); m = reinterpret_cast(reinterpret_cast(mem_info_chunk) - offsetof(mem_info, chunks)); diff --git a/src/msg/async/rdma/Infiniband.h b/src/msg/async/rdma/Infiniband.h index 4d5a14eb8d4..acd7f1eb93d 100644 --- a/src/msg/async/rdma/Infiniband.h +++ b/src/msg/async/rdma/Infiniband.h @@ -25,11 +25,13 @@ #include #include +#include #include #include #include "include/int_types.h" #include "include/page.h" +#include "include/scope_guard.h" #include "common/debug.h" #include "common/errno.h" #include "common/ceph_mutex.h" @@ -290,8 +292,17 @@ class Infiniband { static char * malloc(const size_type bytes); static void free(char * const block); - static MemPoolContext *g_ctx; - static ceph::mutex lock; + template + static std::invoke_result_t with_context(MemPoolContext* ctx, + Func&& func) { + std::lock_guard l{get_lock()}; + g_ctx = ctx; + scope_guard reset_ctx{[] { g_ctx = nullptr; }}; + return std::move(func)(); + } + private: + static ceph::mutex& get_lock(); + static MemPoolContext* g_ctx; }; /**