]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
msg/async/rdma: do not init mutex before lockdeps is ready
authorKefu Chai <kchai@redhat.com>
Mon, 11 Nov 2019 14:11:48 +0000 (22:11 +0800)
committerKefu Chai <kchai@redhat.com>
Tue, 12 Nov 2019 03:26:39 +0000 (11:26 +0800)
if we build ceph with lockdeps enabled, when a mutex is constructed,
a new id is allocated for it from the global free_ids table. but this
table is not initialized until lockdep_register_ceph_context() is
called. this function is not called until the global `CephContext`
instance is constructed. but, the order of initialization of global
variables is not defined. so we cannot let the initialization of
one global variable depend on the existence of one or more other
global variables.

so, in this change, the mutex is initialized by a static method instead.

Fixes: https://tracker.ceph.com/issues/42742
Signed-off-by: Kefu Chai <kchai@redhat.com>
src/msg/async/rdma/Infiniband.cc
src/msg/async/rdma/Infiniband.h

index cdc438c61c296941a35d60626ff8c5ae42869c22..35444fdcde41ca95b07e0462fdd2b3cf8dcddbc4 100644 (file)
@@ -843,20 +843,22 @@ void Infiniband::MemoryManager::MemPoolContext::update_stats(int nbufs)
 
 void *Infiniband::MemoryManager::mem_pool::slow_malloc()
 {
-  void *p;
-
-  std::lock_guard l{PoolAllocator::lock};
-  PoolAllocator::g_ctx = ctx;
   // this will trigger pool expansion via PoolAllocator::malloc()
-  p = boost::pool<PoolAllocator>::malloc();
-  PoolAllocator::g_ctx = nullptr;
-  return p;
+  return PoolAllocator::with_context(ctx, [this] {
+    return boost::pool<PoolAllocator>::malloc();
+  });
 }
-Infiniband::MemoryManager::MemPoolContext *Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr;
-ceph::mutex Infiniband::MemoryManager::PoolAllocator::lock =
-                           ceph::make_mutex("pool-alloc-lock");
+
+Infiniband::MemoryManager::MemPoolContext*
+Infiniband::MemoryManager::PoolAllocator::g_ctx = nullptr;
 
 // lock is taken by mem_pool::slow_malloc()
+ceph::mutex& Infiniband::MemoryManager::PoolAllocator::get_lock()
+{
+  static ceph::mutex lock = ceph::make_mutex("pool-alloc-lock");
+  return lock;
+}
+
 char *Infiniband::MemoryManager::PoolAllocator::malloc(const size_type block_size)
 {
   ceph_assert(g_ctx);
@@ -904,7 +906,7 @@ char *Infiniband::MemoryManager::PoolAllocator::malloc(const size_type block_siz
 void Infiniband::MemoryManager::PoolAllocator::free(char * const block)
 {
   mem_info *m;
-  std::lock_guard l{lock};
+  std::lock_guard l{get_lock()};
     
   Chunk *mem_info_chunk = reinterpret_cast<Chunk *>(block);
   m = reinterpret_cast<mem_info *>(reinterpret_cast<char *>(mem_info_chunk) - offsetof(mem_info, chunks));
index 4d5a14eb8d4cac24ec7da87dd1a125369317ff96..acd7f1eb93d6c6eac41db6e625ec168365315607 100644 (file)
 #include <rdma/rdma_cma.h>
 
 #include <atomic>
+#include <functional>
 #include <string>
 #include <vector>
 
 #include "include/int_types.h"
 #include "include/page.h"
+#include "include/scope_guard.h"
 #include "common/debug.h"
 #include "common/errno.h"
 #include "common/ceph_mutex.h"
@@ -290,8 +292,17 @@ class Infiniband {
       static char * malloc(const size_type bytes);
       static void free(char * const block);
 
-      static MemPoolContext  *g_ctx;
-      static ceph::mutex lock;
+      template<typename Func>
+      static std::invoke_result_t<Func> with_context(MemPoolContext* ctx,
+                                                    Func&& func) {
+       std::lock_guard l{get_lock()};
+       g_ctx = ctx;
+       scope_guard reset_ctx{[] { g_ctx = nullptr; }};
+       return std::move(func)();
+      }
+    private:
+      static ceph::mutex& get_lock();
+      static MemPoolContext* g_ctx;
     };
 
     /**