]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: directly inform IO work queue when locks are required
authorJason Dillaman <dillaman@redhat.com>
Thu, 31 Aug 2017 02:08:15 +0000 (22:08 -0400)
committerJason Dillaman <dillaman@redhat.com>
Thu, 31 Aug 2017 14:00:32 +0000 (10:00 -0400)
Due to lock dependency issues between librbd locks and the the thread
pool lock, it's impossible to directly determine if the lock is
required within the _void_dequeue method. Therefore, this state needs
to be internally tracked by the work queue.

(derived from commit 4a525671b3541a0a208dd039ac96f42bc8fca2cc)

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/AioImageRequestWQ.cc
src/librbd/AioImageRequestWQ.h
src/librbd/ExclusiveLock.cc
src/librbd/exclusive_lock/ReleaseRequest.cc
src/librbd/image/RefreshRequest.cc
src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc
src/test/librbd/image/test_mock_RefreshRequest.cc
src/test/librbd/mock/MockAioImageRequestWQ.h
src/test/librbd/test_mock_ExclusiveLock.cc

index 3704869d0f093dfec0b508e011456c0da273ae07..93bbe6ba37dc4078435e003b57861b298490b1a6 100644 (file)
@@ -292,28 +292,36 @@ void AioImageRequestWQ<I>::unblock_writes() {
 }
 
 template <typename I>
-void AioImageRequestWQ<I>::set_require_lock_on_read() {
+void AioImageRequestWQ<I>::set_require_lock(AioDirection aio_direction,
+                                           bool enabled) {
   CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 20) << __func__ << dendl;
-
-  RWLock::WLocker locker(m_lock);
-  m_require_lock_on_read = true;
-}
-
-template <typename I>
-void AioImageRequestWQ<I>::clear_require_lock_on_read() {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 20) << __func__ << dendl;
+  ldout(cct, 20) << dendl;
 
+  bool wake_up = false;
   {
-    RWLock::WLocker locker(m_lock);
-    if (!m_require_lock_on_read) {
-      return;
+    switch (aio_direction) {
+    case AIO_DIRECTION_READ:
+      wake_up = (enabled != m_require_lock_on_read);
+      m_require_lock_on_read = enabled;
+      break;
+    case AIO_DIRECTION_WRITE:
+      wake_up = (enabled != m_require_lock_on_write);
+      m_require_lock_on_write = enabled;
+      break;
+    case AIO_DIRECTION_BOTH:
+      wake_up = (enabled != m_require_lock_on_read ||
+                 enabled != m_require_lock_on_write);
+      m_require_lock_on_read = enabled;
+      m_require_lock_on_write = enabled;
+      break;
     }
+  }
 
-    m_require_lock_on_read = false;
+  // wake up the thread pool whenever the state changes so that
+  // we can re-request the lock if required
+  if (wake_up) {
+    this->signal();
   }
-  this->signal();
 }
 
 template <typename I>
index 0b7481c202eb76779c44e55c70d2c260cd279718..c21a23de52bf67364e6809a5c46719d94f31c759 100644 (file)
@@ -16,6 +16,12 @@ class AioCompletion;
 template <typename> class AioImageRequest;
 class ImageCtx;
 
+enum AioDirection {
+  AIO_DIRECTION_READ,
+  AIO_DIRECTION_WRITE,
+  AIO_DIRECTION_BOTH
+};
+
 template <typename ImageCtxT = librbd::ImageCtx>
 class AioImageRequestWQ
   : protected ThreadPool::PointerWQ<AioImageRequest<ImageCtxT> > {
@@ -52,8 +58,7 @@ public:
   void block_writes(Context *on_blocked);
   void unblock_writes();
 
-  void set_require_lock_on_read();
-  void clear_require_lock_on_read();
+  void set_require_lock(AioDirection aio_direction, bool enabled);
 
 protected:
   virtual void *_void_dequeue();
@@ -91,6 +96,7 @@ private:
   Contexts m_write_blocker_contexts;
   uint32_t m_write_blockers = 0;
   bool m_require_lock_on_read = false;
+  bool m_require_lock_on_write = false;
   atomic_t m_in_flight_writes {0};
   atomic_t m_queued_reads {0};
   atomic_t m_queued_writes {0};
index 43ed31b6c295eacf57901dccc81ce7839d363503..50cd61eb6d7082023b0a3dc284f9d30d50b89cfc 100644 (file)
@@ -121,10 +121,13 @@ void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
     m_state = STATE_INITIALIZING;
   }
 
-  m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init));
-  if ((features & RBD_FEATURE_JOURNALING) != 0) {
-    m_image_ctx.aio_work_queue->set_require_lock_on_read();
+  if (m_image_ctx.clone_copy_on_read ||
+      (features & RBD_FEATURE_JOURNALING) != 0) {
+    m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
+  } else {
+    m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true);
   }
+  m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init));
 }
 
 template <typename I>
@@ -513,7 +516,7 @@ void ExclusiveLock<I>::handle_acquire_lock(int r) {
 
   if (next_state == STATE_LOCKED) {
     m_image_ctx.image_watcher->notify_acquired_lock();
-    m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+    m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
     m_image_ctx.aio_work_queue->unblock_writes();
   }
 
@@ -732,7 +735,7 @@ void ExclusiveLock<I>::handle_shutdown_released(int r) {
     lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r)
                << dendl;
   } else {
-    m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+    m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
     m_image_ctx.aio_work_queue->unblock_writes();
   }
 
@@ -750,7 +753,7 @@ void ExclusiveLock<I>::handle_shutdown(int r) {
     m_image_ctx.exclusive_lock = nullptr;
   }
 
-  m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+  m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
   m_image_ctx.aio_work_queue->unblock_writes();
   m_image_ctx.image_watcher->flush(util::create_context_callback<
     ExclusiveLock<I>, &ExclusiveLock<I>::complete_shutdown>(this));
index fbe2854fbf491ab03e2d4a41c44d39a0d819892c..552ca065816d74e13734bc46de0a037bf400d648 100644 (file)
@@ -116,8 +116,11 @@ void ReleaseRequest<I>::send_block_writes() {
 
   {
     RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
-      m_image_ctx.aio_work_queue->set_require_lock_on_read();
+    if (m_image_ctx.clone_copy_on_read ||
+        m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
+      m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
+    } else {
+      m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true);
     }
     m_image_ctx.aio_work_queue->block_writes(ctx);
   }
index 3fcedeab33a90c5ca2d41c954d5cd294e6679505..ffaa5790f2377ff94dcd9a942ded8e12cb5cecf2 100644 (file)
@@ -499,7 +499,7 @@ void RefreshRequest<I>::send_v2_open_journal() {
         !journal_disabled_by_policy &&
         m_image_ctx.exclusive_lock != nullptr &&
         m_image_ctx.journal == nullptr) {
-      m_image_ctx.aio_work_queue->set_require_lock_on_read();
+      m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
     }
     send_v2_block_writes();
     return;
@@ -929,7 +929,6 @@ void RefreshRequest<I>::apply() {
       // object map and journaling
       assert(m_exclusive_lock == nullptr);
       m_exclusive_lock = m_image_ctx.exclusive_lock;
-      m_image_ctx.aio_work_queue->clear_require_lock_on_read();
     } else {
       if (m_exclusive_lock != nullptr) {
         assert(m_image_ctx.exclusive_lock == nullptr);
@@ -937,8 +936,8 @@ void RefreshRequest<I>::apply() {
       }
       if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
                                      m_image_ctx.snap_lock)) {
-        if (m_image_ctx.journal != nullptr) {
-          m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+        if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
+          m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_READ, false);
         }
         std::swap(m_journal, m_image_ctx.journal);
       } else if (m_journal != nullptr) {
@@ -949,10 +948,6 @@ void RefreshRequest<I>::apply() {
           m_object_map != nullptr) {
         std::swap(m_object_map, m_image_ctx.object_map);
       }
-      if (m_image_ctx.clone_copy_on_read &&
-          m_image_ctx.aio_work_queue->is_lock_required()) {
-        m_image_ctx.aio_work_queue->set_require_lock_on_read();
-      }
     }
   }
 }
index f15c2ff29b138b09b882a37ce356ffe0ab0d5e6a..73bc8d4e851023e33194e9a56fdb2dd9605c447b 100644 (file)
@@ -51,15 +51,20 @@ public:
                   .WillOnce(Return(enabled));
   }
 
-  void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
+  void expect_set_require_lock(MockImageCtx &mock_image_ctx,
+                               AioDirection direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+                                                                 enabled));
   }
 
   void expect_block_writes(MockImageCtx &mock_image_ctx, int r) {
     expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
                          ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0));
-    if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
-      expect_set_require_lock_on_read(mock_image_ctx);
+    if (mock_image_ctx.clone_copy_on_read ||
+        (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+      expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
+    } else {
+      expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true);
     }
     EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
                   .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
index 2938a9fef923e0420c6d66a7d1e81829d3f2f821..5f8b813ee10b82b451a8d83971b5a8f35e139d25 100644 (file)
@@ -97,16 +97,10 @@ public:
   typedef RefreshRequest<MockRefreshImageCtx> MockRefreshRequest;
   typedef RefreshParentRequest<MockRefreshImageCtx> MockRefreshParentRequest;
 
-  void expect_is_lock_required(MockRefreshImageCtx &mock_image_ctx, bool require_lock) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, is_lock_required()).WillOnce(Return(require_lock));
-  }
-
-  void expect_set_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
-  }
-
-  void expect_clear_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read());
+  void expect_set_require_lock(MockRefreshImageCtx &mock_image_ctx,
+                               AioDirection direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+                                                                 enabled));
   }
 
   void expect_v1_read_header(MockRefreshImageCtx &mock_image_ctx, int r) {
@@ -590,7 +584,6 @@ TEST_F(TestMockImageRefreshRequest, DisableExclusiveLock) {
   expect_get_mutable_metadata(mock_image_ctx, 0);
   expect_get_flags(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_clear_require_lock_on_read(mock_image_ctx);
   expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0);
 
   C_SaferCond ctx;
@@ -687,44 +680,6 @@ TEST_F(TestMockImageRefreshRequest, JournalDisabledByPolicy) {
   ASSERT_EQ(0, ctx.wait());
 }
 
-TEST_F(TestMockImageRefreshRequest, ExclusiveLockWithCoR) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  REQUIRE(!is_feature_enabled(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF | RBD_FEATURE_JOURNALING))
-
-  std::string val;
-  ASSERT_EQ(0, _rados.conf_get("rbd_clone_copy_on_read", val));
-  if (val == "false") {
-    std::cout << "SKIPPING due to disabled rbd_copy_on_read" << std::endl;
-    return;
-  }
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockRefreshImageCtx mock_image_ctx(*ictx);
-  MockRefreshParentRequest mock_refresh_parent_request;
-
-  MockExclusiveLock mock_exclusive_lock;
-  mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
-
-  expect_op_work_queue(mock_image_ctx);
-  expect_test_features(mock_image_ctx);
-
-  InSequence seq;
-  expect_get_mutable_metadata(mock_image_ctx, 0);
-  expect_get_flags(mock_image_ctx, 0);
-  expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_is_lock_required(mock_image_ctx, true);
-  expect_set_require_lock_on_read(mock_image_ctx);
-
-  C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
-  req->send();
-
-  ASSERT_EQ(0, ctx.wait());
-}
-
 TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) {
   REQUIRE_FEATURE(RBD_FEATURE_JOURNALING);
 
@@ -798,7 +753,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithoutExclusiveLock) {
   expect_get_mutable_metadata(mock_image_ctx, 0);
   expect_get_flags(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_set_require_lock_on_read(mock_image_ctx);
+  expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
 
   C_SaferCond ctx;
   MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
@@ -840,7 +795,9 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) {
   expect_get_flags(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
   expect_block_writes(mock_image_ctx, 0);
-  expect_clear_require_lock_on_read(mock_image_ctx);
+  if (!mock_image_ctx.clone_copy_on_read) {
+    expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_READ, false);
+  }
   expect_close_journal(mock_image_ctx, *mock_journal, 0);
   expect_unblock_writes(mock_image_ctx);
 
index 2b1cefee405fba1024f2c546907116feda89ad60..0f3efac64c7fa1b9eef4e1f080c94d12594040a4 100644 (file)
@@ -5,6 +5,7 @@
 #define CEPH_TEST_LIBRBD_MOCK_AIO_IMAGE_REQUEST_WQ_H
 
 #include "gmock/gmock.h"
+#include "librbd/AioImageRequestWQ.h"
 
 class Context;
 
@@ -14,10 +15,8 @@ struct MockAioImageRequestWQ {
   MOCK_METHOD1(block_writes, void(Context *));
   MOCK_METHOD0(unblock_writes, void());
 
-  MOCK_METHOD0(set_require_lock_on_read, void());
-  MOCK_METHOD0(clear_require_lock_on_read, void());
+  MOCK_METHOD2(set_require_lock, void(AioDirection, bool));
 
-  MOCK_CONST_METHOD0(is_lock_required, bool());
   MOCK_CONST_METHOD0(is_lock_request_needed, bool());
 };
 
index df6a4ae35f11300ffce7bb055f53d5d80bd23b57..df74828c97788d0f616be451a85d5f95844f7b51 100644 (file)
@@ -106,24 +106,26 @@ public:
                   .WillRepeatedly(Return(watch_handle));
   }
 
-  void expect_set_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
-  }
-
-  void expect_clear_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read());
+  void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx,
+                               AioDirection direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+                                                                 enabled));
   }
 
   void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
+    if (mock_image_ctx.clone_copy_on_read ||
+        (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+      expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
+    } else {
+      expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true);
+    }
+
     EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
-    if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
-      expect_set_require_lock_on_read(mock_image_ctx);
-    }
   }
 
   void expect_unblock_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
-    expect_clear_require_lock_on_read(mock_image_ctx);
+    expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, false);
     EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes());
   }