]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: change init method of Parent cache to asynchronous
authorshangdehao1 <dehao.shang@intel.com>
Thu, 13 Jun 2019 01:08:18 +0000 (09:08 +0800)
committerJason Dillaman <dillaman@redhat.com>
Mon, 24 Jun 2019 21:36:54 +0000 (17:36 -0400)
Signed-off-by: Dehao Shang <dehao.shang@intel.com>
src/librbd/cache/ParentCacheObjectDispatch.cc
src/librbd/cache/ParentCacheObjectDispatch.h
src/librbd/image/OpenRequest.cc

index 238cf36f13cc21817e7c41ee895c4722a9341b9a..a79168e2acf0f381c4c5136427712da785e234fb 100644 (file)
@@ -28,7 +28,7 @@ namespace cache {
 template <typename I>
 ParentCacheObjectDispatch<I>::ParentCacheObjectDispatch(
     I* image_ctx) : m_image_ctx(image_ctx), m_cache_client(nullptr),
-    m_initialized(false), m_re_connecting(false),
+    m_initialized(false), m_connecting(false),
     m_lock("librbd::cache::ParentCacheObjectDispatch::m_lock") {
   std::string controller_path =
     ((CephContext*)(m_image_ctx->cct))->_conf.get_val<std::string>("immutable_object_cache_sock");
@@ -50,13 +50,14 @@ void ParentCacheObjectDispatch<I>::init() {
     return;
   }
 
-  C_SaferCond* cond = new C_SaferCond();
-  Context* create_session_ctx = new FunctionContext([cond](int ret) {
-    cond->complete(0);
+  ceph_assert(m_connecting.load() == false);
+  m_connecting.store(true);
+  Context* create_session_ctx = new FunctionContext([this](int ret) {
+    Mutex::Locker locker(m_lock);
+    m_connecting.store(false);
   });
 
   create_cache_session(create_session_ctx, false);
-  cond->wait();
 
   m_image_ctx->io_object_dispatcher->register_object_dispatch(this);
   m_initialized = true;
@@ -81,18 +82,18 @@ bool ParentCacheObjectDispatch<I>::read(
   if (!m_cache_client->is_session_work()) {
     {
       Mutex::Locker locker(m_lock);
-      if (m_re_connecting.load()) {
+      if (m_connecting.load()) {
         ldout(cct, 5) << "Parent cache is re-connecting RO daemon, "
                       << "dispatch current request to lower object layer " << dendl;
         return false;
       }
-      m_re_connecting.store(true);
+      m_connecting.store(true);
     }
 
-    ceph_assert(m_re_connecting.load());
+    ceph_assert(m_connecting.load());
 
     Context* on_finish = new FunctionContext([this](int ret) {
-      m_re_connecting.store(false);
+      m_connecting.store(false);
     });
     create_cache_session(on_finish, true);
 
index 012dcd8b88f31648536a7667279c9a59a039c92d..c41edef62394b8890cc8f6cab630479ec62e4d82 100644 (file)
@@ -133,7 +133,7 @@ private:
   ImageCtxT* m_image_ctx;
   CacheClient *m_cache_client;
   bool m_initialized;
-  std::atomic<bool> m_re_connecting;
+  std::atomic<bool> m_connecting;
   Mutex m_lock;
 };
 
index cba70212302fb74745d127c665987a70e5c58688..81b9a03b5fa53e85ada25cf2293f45e561632a1b 100644 (file)
@@ -522,13 +522,14 @@ Context *OpenRequest<I>::send_init_cache(int *result) {
   CephContext *cct = m_image_ctx->cct;
 
   if (!m_image_ctx->cache || m_image_ctx->child != nullptr) {
-     // enable Shared Read-only cache for parent image
+    // enable Parent cache for parent image
     bool parent_cache_enabled = m_image_ctx->config.template get_val<bool>(
       "rbd_parent_cache_enabled");
+
     if (m_image_ctx->child != nullptr && parent_cache_enabled ) {
       ldout(cct, 10) << this << " " << "setting up parent cache"<< dendl;
-      auto sro_cache = cache::ParentCacheObjectDispatch<I>::create(m_image_ctx);
-      sro_cache->init();
+      auto parent_cache = cache::ParentCacheObjectDispatch<I>::create(m_image_ctx);
+      parent_cache->init();
     }
     return send_register_watch(result);
   }