]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
common: Switch singletons to use immobile_any 20273/head
authorAdam C. Emerson <aemerson@redhat.com>
Wed, 7 Feb 2018 18:40:08 +0000 (13:40 -0500)
committerAdam C. Emerson <aemerson@redhat.com>
Wed, 7 Feb 2018 18:40:08 +0000 (13:40 -0500)
This cleans up the interface, makes things more robust by using both
name and type, and does fewer allocations.

Signed-off-by: Adam C. Emerson <aemerson@redhat.com>
src/common/TracepointProvider.h
src/common/ceph_context.cc
src/common/ceph_context.h
src/librbd/ImageCtx.cc
src/librbd/ImageState.cc
src/librbd/Journal.cc
src/librbd/TaskFinisher.h
src/msg/async/AsyncMessenger.cc
src/msg/async/Event.cc
src/msg/async/Event.h
src/tools/rbd_mirror/Mirror.cc

index 97d3a0f763723fa0e906fd8ed1e4025b8613ea25..82a0359484ee406d902dadf0e899e168b2eba95f 100644 (file)
@@ -10,7 +10,7 @@
 
 struct md_config_t;
 
-class TracepointProvider : public md_config_obs_t, boost::noncopyable {
+class TracepointProvider : public md_config_obs_t {
 public:
   struct Traits {
     const char *library;
@@ -49,11 +49,16 @@ public:
                      const char *config_key);
   ~TracepointProvider() override;
 
+  TracepointProvider(const TracepointProvider&) = delete;
+  TracepointProvider operator =(const TracepointProvider&) = delete;
+  TracepointProvider(TracepointProvider&&) = delete;
+  TracepointProvider operator =(TracepointProvider&&) = delete;
+
   template <const Traits &traits>
   static void initialize(CephContext *cct) {
 #ifdef WITH_LTTNG
-    TypedSingleton<traits> *singleton;
-    cct->lookup_or_create_singleton_object(singleton, traits.library);
+     cct->lookup_or_create_singleton_object<TypedSingleton<traits>>(
+       traits.library, cct);
 #endif
   }
 
index 401dbf53fc055c7f209b9d249320131fc317d351..7005777b2b6a57278d7b2d7b18f7dba903525bd5 100644 (file)
@@ -639,18 +639,14 @@ CephContext::CephContext(uint32_t module_type_,
   _crypto_aes = CryptoHandler::create(CEPH_CRYPTO_AES);
   _crypto_random.reset(new CryptoRandom());
 
-  MempoolObs *mempool_obs = 0;
-  lookup_or_create_singleton_object(mempool_obs, "mempool_obs");
+  lookup_or_create_singleton_object<MempoolObs>("mempool_obs", this);
 }
 
 CephContext::~CephContext()
 {
+  associated_objs.clear();
   join_service_thread();
 
-  for (map<string, SingletonWrapper*>::iterator it = _associated_objs.begin();
-       it != _associated_objs.end(); ++it)
-    delete it->second;
-
   if (_cct_perf) {
     _perf_counters_collection->remove(_cct_perf);
     delete _cct_perf;
index 9070cfb72e82e2c3d8a0584b7c9a0723418d62ec..1bdef28866cff813097bb705f06e18e9872b94ee 100644 (file)
 #ifndef CEPH_CEPHCONTEXT_H
 #define CEPH_CEPHCONTEXT_H
 
-#include <set>
+#include <atomic>
+#include <map>
 #include <memory>
 #include <mutex>
-#include <atomic>
+#include <set>
+#include <string>
+#include <string_view>
+#include <typeinfo>
+#include <typeindex>
 
-#include <boost/noncopyable.hpp>
+#include "include/any.h"
 
 #include "common/cmdparse.h"
 #include "common/code_environment.h"
@@ -61,6 +66,11 @@ public:
               enum code_environment_t code_env=CODE_ENVIRONMENT_UTILITY,
               int init_flags_ = 0);
 
+  CephContext(const CephContext&) = delete;
+  CephContext& operator =(const CephContext&) = delete;
+  CephContext(CephContext&&) = delete;
+  CephContext& operator =(CephContext&&) = delete;
+
   // ref count!
 private:
   ~CephContext();
@@ -129,20 +139,28 @@ public:
   void do_command(std::string command, cmdmap_t& cmdmap, std::string format,
                  ceph::bufferlist *out);
 
-  template<typename T>
-  void lookup_or_create_singleton_object(T*& p, const std::string &name) {
-    std::lock_guard<ceph::spinlock> lg(_associated_objs_lock);
-
-    if (!_associated_objs.count(name)) {
-      p = new T(this);
-      _associated_objs[name] = new TypedSingletonWrapper<T>(p);
-    } else {
-      TypedSingletonWrapper<T> *wrapper =
-        dynamic_cast<TypedSingletonWrapper<T> *>(_associated_objs[name]);
-      assert(wrapper != NULL);
-      p = wrapper->singleton;
+  static constexpr std::size_t largest_singleton = sizeof(void*) * 72;
+
+  template<typename T, typename... Args>
+  T& lookup_or_create_singleton_object(std::string_view name,
+                                      Args&&... args) {
+    static_assert(sizeof(T) <= largest_singleton,
+                 "Please increase largest singleton.");
+    std::lock_guard lg(associated_objs_lock);
+    std::type_index type = typeid(T);
+
+    auto i = associated_objs.find(std::make_pair(name, type));
+    if (i == associated_objs.cend()) {
+      i = associated_objs.emplace_hint(
+       i,
+       std::piecewise_construct,
+       std::forward_as_tuple(name, type),
+       std::forward_as_tuple(std::in_place_type<T>,
+                             std::forward<Args>(args)...));
     }
+    return ceph::any_cast<T&>(i->second);
   }
+
   /**
    * get a crypto handler
    */
@@ -206,23 +224,7 @@ public:
   }
 
 private:
-  struct SingletonWrapper : boost::noncopyable {
-    virtual ~SingletonWrapper() {}
-  };
-
-  template <typename T>
-  struct TypedSingletonWrapper : public SingletonWrapper {
-    TypedSingletonWrapper(T *p) : singleton(p) {
-    }
-    ~TypedSingletonWrapper() override {
-      delete singleton;
-    }
-
-    T *singleton;
-  };
 
-  CephContext(const CephContext &rhs);
-  CephContext &operator=(const CephContext &rhs);
 
   /* Stop and join the Ceph Context's service thread */
   void join_service_thread();
@@ -260,8 +262,21 @@ private:
 
   ceph::HeartbeatMap *_heartbeat_map;
 
-  ceph::spinlock _associated_objs_lock;
-  std::map<std::string, SingletonWrapper*> _associated_objs;
+  ceph::spinlock associated_objs_lock;
+
+  struct associated_objs_cmp {
+    using is_transparent = std::true_type;
+    template<typename T, typename U>
+    bool operator ()(const std::pair<T, std::type_index>& l,
+                    const std::pair<U, std::type_index>& r) const noexcept {
+      return ((l.first < r.first)  ||
+             (l.first == r.first && l.second < r.second));
+    }
+  };
+
+  std::map<std::pair<std::string, std::type_index>,
+          ceph::immobile_any<largest_singleton>,
+          associated_objs_cmp> associated_objs;
 
   ceph::spinlock _fork_watchers_lock;
   std::vector<ForkWatcher*> _fork_watchers;
index ca4de1b3c1477e05edfe7589d166407070c9f34a..4ab5490f0a3b8263e062b9840fb02a822806ac6e 100644 (file)
@@ -1163,18 +1163,18 @@ struct C_InvalidateCache : public Context {
   void ImageCtx::get_thread_pool_instance(CephContext *cct,
                                           ThreadPool **thread_pool,
                                           ContextWQ **op_work_queue) {
-    ThreadPoolSingleton *thread_pool_singleton;
-    cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
-      thread_pool_singleton, "librbd::thread_pool");
+    auto thread_pool_singleton =
+      &cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
+       "librbd::thread_pool", cct);
     *thread_pool = thread_pool_singleton;
     *op_work_queue = thread_pool_singleton->op_work_queue;
   }
 
   void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer,
                                     Mutex **timer_lock) {
-    SafeTimerSingleton *safe_timer_singleton;
-    cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
-      safe_timer_singleton, "librbd::journal::safe_timer");
+    auto safe_timer_singleton =
+      &cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
+       "librbd::journal::safe_timer", cct);
     *timer = safe_timer_singleton;
     *timer_lock = &safe_timer_singleton->lock;
   }
index 1f294879027e9b5bd63a51e1964b82a3a6693be2..1fcb54a5342dd10e02226a1cfc00545d0c51d592 100644 (file)
@@ -212,12 +212,11 @@ private:
     if (m_work_queue != nullptr) {
       return;
     }
-    ThreadPoolSingleton *thread_pool_singleton;
-    m_cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
-      thread_pool_singleton, "librbd::ImageUpdateWatchers::thread_pool");
+    auto& thread_pool = m_cct->lookup_or_create_singleton_object<
+      ThreadPoolSingleton>("librbd::ImageUpdateWatchers::thread_pool", m_cct);
     m_work_queue = new ContextWQ("librbd::ImageUpdateWatchers::op_work_queue",
                                 m_cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
-                                thread_pool_singleton);
+                                &thread_pool);
   }
 
   void destroy_work_queue() {
index 90cd5e08f8111d612a6224bc93bc2e8305ed6846..d4f057ca3dbe494ada2da900f1d1f683eb0c8c44 100644 (file)
@@ -334,9 +334,9 @@ Journal<I>::Journal(I &image_ctx)
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << ": ictx=" << &m_image_ctx << dendl;
 
-  ThreadPoolSingleton *thread_pool_singleton;
-  cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
-    thread_pool_singleton, "librbd::journal::thread_pool");
+  auto thread_pool_singleton =
+    &cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
+      "librbd::journal::thread_pool", cct);
   m_work_queue = new ContextWQ("librbd::journal::work_queue",
                                cct->_conf->get_val<int64_t>("rbd_op_thread_timeout"),
                                thread_pool_singleton);
index 172abef7fb3d4fe6f990116df71055ce25516190..6ca0e19a6a0deb4d3d3c8a84cfc7870ca5c3cd89 100644 (file)
@@ -4,6 +4,7 @@
 #define LIBRBD_TASK_FINISHER_H
 
 #include "include/Context.h"
+#include "common/ceph_context.h"
 #include "common/Finisher.h"
 #include "common/Mutex.h"
 #include "common/Timer.h"
@@ -43,12 +44,12 @@ template <typename Task>
 class TaskFinisher {
 public:
   TaskFinisher(CephContext &cct) : m_cct(cct) {
-    TaskFinisherSingleton *singleton;
-    cct.lookup_or_create_singleton_object<TaskFinisherSingleton>(
-      singleton, "librbd::TaskFinisher::m_safe_timer");
-    m_lock = &singleton->m_lock;
-    m_safe_timer = singleton->m_safe_timer;
-    m_finisher = singleton->m_finisher;
+    auto& singleton =
+      cct.lookup_or_create_singleton_object<TaskFinisherSingleton>(
+       "librbd::TaskFinisher::m_safe_timer", &cct);
+    m_lock = &singleton.m_lock;
+    m_safe_timer = singleton.m_safe_timer;
+    m_finisher = singleton.m_finisher;
   }
 
   void cancel(const Task& task) {
index 103081ce0246417935c47c4afd268e899861f7b5..d42220595738d0a7256390968d1430a7477dba9f 100644 (file)
@@ -259,8 +259,8 @@ AsyncMessenger::AsyncMessenger(CephContext *cct, entity_name_t name,
   else if (type.find("dpdk") != std::string::npos)
     transport_type = "dpdk";
 
-  StackSingleton *single;
-  cct->lookup_or_create_singleton_object<StackSingleton>(single, "AsyncMessenger::NetworkStack::"+transport_type);
+  auto single = &cct->lookup_or_create_singleton_object<StackSingleton>(
+    "AsyncMessenger::NetworkStack::" + transport_type, cct);
   single->ready(transport_type);
   stack = single->stack.get();
   stack->start();
index af5dad703eeb12c2da4bc48fd34cd9bc01c68718..92bea9db19650b985d7e58632ecb7c6addf0af0c 100644 (file)
@@ -189,8 +189,9 @@ void EventCenter::set_owner()
   owner = pthread_self();
   ldout(cct, 2) << __func__ << " idx=" << idx << " owner=" << owner << dendl;
   if (!global_centers) {
-    cct->lookup_or_create_singleton_object<EventCenter::AssociatedCenters>(
-        global_centers, "AsyncMessenger::EventCenter::global_center::"+type);
+    global_centers = &cct->lookup_or_create_singleton_object<
+      EventCenter::AssociatedCenters>(
+       "AsyncMessenger::EventCenter::global_center::" + type);
     assert(global_centers);
     global_centers->centers[idx] = this;
     if (driver->need_wakeup()) {
index 50f2a2b8795be04807169dbf44692e85b23b7137..3a49f0c0b94adb27f3870ea46cb81b47df20326a 100644 (file)
@@ -94,7 +94,7 @@ class EventCenter {
 
   struct AssociatedCenters {
     EventCenter *centers[MAX_EVENTCENTER];
-    AssociatedCenters(CephContext *c) {
+    AssociatedCenters() {
       memset(centers, 0, MAX_EVENTCENTER * sizeof(EventCenter*));
     }
   };
index f3bc77b0950783aaa431f82765419a3ad03f8414..00a7238bea21dfa7d656b4a372866f1a8f411978 100644 (file)
@@ -204,8 +204,9 @@ Mirror::Mirror(CephContext *cct, const std::vector<const char*> &args) :
   m_local(new librados::Rados()),
   m_asok_hook(new MirrorAdminSocketHook(cct, this))
 {
-  cct->lookup_or_create_singleton_object<Threads<librbd::ImageCtx> >(
-    m_threads, "rbd_mirror::threads");
+  m_threads =
+    &(cct->lookup_or_create_singleton_object<Threads<librbd::ImageCtx>>(
+       "rbd_mirror::threads", cct));
   m_service_daemon.reset(new ServiceDaemon<>(m_cct, m_local, m_threads));
 }