From 143c59c30daafcf85823fa93267ea741c3d67019 Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Wed, 7 Feb 2018 13:40:08 -0500 Subject: [PATCH] common: Switch singletons to use immobile_any This cleans up the interface, makes things more robust by using both name and type, and does fewer allocations. Signed-off-by: Adam C. Emerson --- src/common/TracepointProvider.h | 11 +++-- src/common/ceph_context.cc | 8 +--- src/common/ceph_context.h | 81 +++++++++++++++++++-------------- src/librbd/ImageCtx.cc | 12 ++--- src/librbd/ImageState.cc | 7 ++- src/librbd/Journal.cc | 6 +-- src/librbd/TaskFinisher.h | 13 +++--- src/msg/async/AsyncMessenger.cc | 4 +- src/msg/async/Event.cc | 5 +- src/msg/async/Event.h | 2 +- src/tools/rbd_mirror/Mirror.cc | 5 +- 11 files changed, 86 insertions(+), 68 deletions(-) diff --git a/src/common/TracepointProvider.h b/src/common/TracepointProvider.h index 97d3a0f763723..82a0359484ee4 100644 --- a/src/common/TracepointProvider.h +++ b/src/common/TracepointProvider.h @@ -10,7 +10,7 @@ struct md_config_t; -class TracepointProvider : public md_config_obs_t, boost::noncopyable { +class TracepointProvider : public md_config_obs_t { public: struct Traits { const char *library; @@ -49,11 +49,16 @@ public: const char *config_key); ~TracepointProvider() override; + TracepointProvider(const TracepointProvider&) = delete; + TracepointProvider operator =(const TracepointProvider&) = delete; + TracepointProvider(TracepointProvider&&) = delete; + TracepointProvider operator =(TracepointProvider&&) = delete; + template static void initialize(CephContext *cct) { #ifdef WITH_LTTNG - TypedSingleton *singleton; - cct->lookup_or_create_singleton_object(singleton, traits.library); + cct->lookup_or_create_singleton_object>( + traits.library, cct); #endif } diff --git a/src/common/ceph_context.cc b/src/common/ceph_context.cc index 401dbf53fc055..7005777b2b6a5 100644 --- a/src/common/ceph_context.cc +++ b/src/common/ceph_context.cc @@ -639,18 +639,14 @@ CephContext::CephContext(uint32_t module_type_, _crypto_aes = CryptoHandler::create(CEPH_CRYPTO_AES); _crypto_random.reset(new CryptoRandom()); - MempoolObs *mempool_obs = 0; - lookup_or_create_singleton_object(mempool_obs, "mempool_obs"); + lookup_or_create_singleton_object("mempool_obs", this); } CephContext::~CephContext() { + associated_objs.clear(); join_service_thread(); - for (map::iterator it = _associated_objs.begin(); - it != _associated_objs.end(); ++it) - delete it->second; - if (_cct_perf) { _perf_counters_collection->remove(_cct_perf); delete _cct_perf; diff --git a/src/common/ceph_context.h b/src/common/ceph_context.h index 9070cfb72e82e..1bdef28866cff 100644 --- a/src/common/ceph_context.h +++ b/src/common/ceph_context.h @@ -15,12 +15,17 @@ #ifndef CEPH_CEPHCONTEXT_H #define CEPH_CEPHCONTEXT_H -#include +#include +#include #include #include -#include +#include +#include +#include +#include +#include -#include +#include "include/any.h" #include "common/cmdparse.h" #include "common/code_environment.h" @@ -61,6 +66,11 @@ public: enum code_environment_t code_env=CODE_ENVIRONMENT_UTILITY, int init_flags_ = 0); + CephContext(const CephContext&) = delete; + CephContext& operator =(const CephContext&) = delete; + CephContext(CephContext&&) = delete; + CephContext& operator =(CephContext&&) = delete; + // ref count! private: ~CephContext(); @@ -129,20 +139,28 @@ public: void do_command(std::string command, cmdmap_t& cmdmap, std::string format, ceph::bufferlist *out); - template - void lookup_or_create_singleton_object(T*& p, const std::string &name) { - std::lock_guard lg(_associated_objs_lock); - - if (!_associated_objs.count(name)) { - p = new T(this); - _associated_objs[name] = new TypedSingletonWrapper(p); - } else { - TypedSingletonWrapper *wrapper = - dynamic_cast *>(_associated_objs[name]); - assert(wrapper != NULL); - p = wrapper->singleton; + static constexpr std::size_t largest_singleton = sizeof(void*) * 72; + + template + T& lookup_or_create_singleton_object(std::string_view name, + Args&&... args) { + static_assert(sizeof(T) <= largest_singleton, + "Please increase largest singleton."); + std::lock_guard lg(associated_objs_lock); + std::type_index type = typeid(T); + + auto i = associated_objs.find(std::make_pair(name, type)); + if (i == associated_objs.cend()) { + i = associated_objs.emplace_hint( + i, + std::piecewise_construct, + std::forward_as_tuple(name, type), + std::forward_as_tuple(std::in_place_type, + std::forward(args)...)); } + return ceph::any_cast(i->second); } + /** * get a crypto handler */ @@ -206,23 +224,7 @@ public: } private: - struct SingletonWrapper : boost::noncopyable { - virtual ~SingletonWrapper() {} - }; - - template - struct TypedSingletonWrapper : public SingletonWrapper { - TypedSingletonWrapper(T *p) : singleton(p) { - } - ~TypedSingletonWrapper() override { - delete singleton; - } - - T *singleton; - }; - CephContext(const CephContext &rhs); - CephContext &operator=(const CephContext &rhs); /* Stop and join the Ceph Context's service thread */ void join_service_thread(); @@ -260,8 +262,21 @@ private: ceph::HeartbeatMap *_heartbeat_map; - ceph::spinlock _associated_objs_lock; - std::map _associated_objs; + ceph::spinlock associated_objs_lock; + + struct associated_objs_cmp { + using is_transparent = std::true_type; + template + bool operator ()(const std::pair& l, + const std::pair& r) const noexcept { + return ((l.first < r.first) || + (l.first == r.first && l.second < r.second)); + } + }; + + std::map, + ceph::immobile_any, + associated_objs_cmp> associated_objs; ceph::spinlock _fork_watchers_lock; std::vector _fork_watchers; diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc index ca4de1b3c1477..4ab5490f0a3b8 100644 --- a/src/librbd/ImageCtx.cc +++ b/src/librbd/ImageCtx.cc @@ -1163,18 +1163,18 @@ struct C_InvalidateCache : public Context { void ImageCtx::get_thread_pool_instance(CephContext *cct, ThreadPool **thread_pool, ContextWQ **op_work_queue) { - ThreadPoolSingleton *thread_pool_singleton; - cct->lookup_or_create_singleton_object( - thread_pool_singleton, "librbd::thread_pool"); + auto thread_pool_singleton = + &cct->lookup_or_create_singleton_object( + "librbd::thread_pool", cct); *thread_pool = thread_pool_singleton; *op_work_queue = thread_pool_singleton->op_work_queue; } void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer, Mutex **timer_lock) { - SafeTimerSingleton *safe_timer_singleton; - cct->lookup_or_create_singleton_object( - safe_timer_singleton, "librbd::journal::safe_timer"); + auto safe_timer_singleton = + &cct->lookup_or_create_singleton_object( + "librbd::journal::safe_timer", cct); *timer = safe_timer_singleton; *timer_lock = &safe_timer_singleton->lock; } diff --git a/src/librbd/ImageState.cc b/src/librbd/ImageState.cc index 1f294879027e9..1fcb54a5342dd 100644 --- a/src/librbd/ImageState.cc +++ b/src/librbd/ImageState.cc @@ -212,12 +212,11 @@ private: if (m_work_queue != nullptr) { return; } - ThreadPoolSingleton *thread_pool_singleton; - m_cct->lookup_or_create_singleton_object( - thread_pool_singleton, "librbd::ImageUpdateWatchers::thread_pool"); + auto& thread_pool = m_cct->lookup_or_create_singleton_object< + ThreadPoolSingleton>("librbd::ImageUpdateWatchers::thread_pool", m_cct); m_work_queue = new ContextWQ("librbd::ImageUpdateWatchers::op_work_queue", m_cct->_conf->get_val("rbd_op_thread_timeout"), - thread_pool_singleton); + &thread_pool); } void destroy_work_queue() { diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc index 90cd5e08f8111..d4f057ca3dbe4 100644 --- a/src/librbd/Journal.cc +++ b/src/librbd/Journal.cc @@ -334,9 +334,9 @@ Journal::Journal(I &image_ctx) CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << ": ictx=" << &m_image_ctx << dendl; - ThreadPoolSingleton *thread_pool_singleton; - cct->lookup_or_create_singleton_object( - thread_pool_singleton, "librbd::journal::thread_pool"); + auto thread_pool_singleton = + &cct->lookup_or_create_singleton_object( + "librbd::journal::thread_pool", cct); m_work_queue = new ContextWQ("librbd::journal::work_queue", cct->_conf->get_val("rbd_op_thread_timeout"), thread_pool_singleton); diff --git a/src/librbd/TaskFinisher.h b/src/librbd/TaskFinisher.h index 172abef7fb3d4..6ca0e19a6a0de 100644 --- a/src/librbd/TaskFinisher.h +++ b/src/librbd/TaskFinisher.h @@ -4,6 +4,7 @@ #define LIBRBD_TASK_FINISHER_H #include "include/Context.h" +#include "common/ceph_context.h" #include "common/Finisher.h" #include "common/Mutex.h" #include "common/Timer.h" @@ -43,12 +44,12 @@ template class TaskFinisher { public: TaskFinisher(CephContext &cct) : m_cct(cct) { - TaskFinisherSingleton *singleton; - cct.lookup_or_create_singleton_object( - singleton, "librbd::TaskFinisher::m_safe_timer"); - m_lock = &singleton->m_lock; - m_safe_timer = singleton->m_safe_timer; - m_finisher = singleton->m_finisher; + auto& singleton = + cct.lookup_or_create_singleton_object( + "librbd::TaskFinisher::m_safe_timer", &cct); + m_lock = &singleton.m_lock; + m_safe_timer = singleton.m_safe_timer; + m_finisher = singleton.m_finisher; } void cancel(const Task& task) { diff --git a/src/msg/async/AsyncMessenger.cc b/src/msg/async/AsyncMessenger.cc index 103081ce02464..d42220595738d 100644 --- a/src/msg/async/AsyncMessenger.cc +++ b/src/msg/async/AsyncMessenger.cc @@ -259,8 +259,8 @@ AsyncMessenger::AsyncMessenger(CephContext *cct, entity_name_t name, else if (type.find("dpdk") != std::string::npos) transport_type = "dpdk"; - StackSingleton *single; - cct->lookup_or_create_singleton_object(single, "AsyncMessenger::NetworkStack::"+transport_type); + auto single = &cct->lookup_or_create_singleton_object( + "AsyncMessenger::NetworkStack::" + transport_type, cct); single->ready(transport_type); stack = single->stack.get(); stack->start(); diff --git a/src/msg/async/Event.cc b/src/msg/async/Event.cc index af5dad703eeb1..92bea9db19650 100644 --- a/src/msg/async/Event.cc +++ b/src/msg/async/Event.cc @@ -189,8 +189,9 @@ void EventCenter::set_owner() owner = pthread_self(); ldout(cct, 2) << __func__ << " idx=" << idx << " owner=" << owner << dendl; if (!global_centers) { - cct->lookup_or_create_singleton_object( - global_centers, "AsyncMessenger::EventCenter::global_center::"+type); + global_centers = &cct->lookup_or_create_singleton_object< + EventCenter::AssociatedCenters>( + "AsyncMessenger::EventCenter::global_center::" + type); assert(global_centers); global_centers->centers[idx] = this; if (driver->need_wakeup()) { diff --git a/src/msg/async/Event.h b/src/msg/async/Event.h index 50f2a2b8795be..3a49f0c0b94ad 100644 --- a/src/msg/async/Event.h +++ b/src/msg/async/Event.h @@ -94,7 +94,7 @@ class EventCenter { struct AssociatedCenters { EventCenter *centers[MAX_EVENTCENTER]; - AssociatedCenters(CephContext *c) { + AssociatedCenters() { memset(centers, 0, MAX_EVENTCENTER * sizeof(EventCenter*)); } }; diff --git a/src/tools/rbd_mirror/Mirror.cc b/src/tools/rbd_mirror/Mirror.cc index f3bc77b095078..00a7238bea21d 100644 --- a/src/tools/rbd_mirror/Mirror.cc +++ b/src/tools/rbd_mirror/Mirror.cc @@ -204,8 +204,9 @@ Mirror::Mirror(CephContext *cct, const std::vector &args) : m_local(new librados::Rados()), m_asok_hook(new MirrorAdminSocketHook(cct, this)) { - cct->lookup_or_create_singleton_object >( - m_threads, "rbd_mirror::threads"); + m_threads = + &(cct->lookup_or_create_singleton_object>( + "rbd_mirror::threads", cct)); m_service_daemon.reset(new ServiceDaemon<>(m_cct, m_local, m_threads)); } -- 2.39.5