]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: directly inform IO work queue when locks are required
authorJason Dillaman <dillaman@redhat.com>
Thu, 22 Jun 2017 13:10:20 +0000 (09:10 -0400)
committerJason Dillaman <dillaman@redhat.com>
Tue, 27 Jun 2017 17:56:09 +0000 (13:56 -0400)
Due to lock dependency issues between librbd locks and the the thread
pool lock, it's impossible to directly determine if the lock is
required within the _void_dequeue method. Therefore, this state needs
to be internally tracked by the work queue.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/ExclusiveLock.cc
src/librbd/exclusive_lock/PreReleaseRequest.cc
src/librbd/image/RefreshRequest.cc
src/librbd/io/ImageRequestWQ.cc
src/librbd/io/ImageRequestWQ.h
src/librbd/io/Types.h
src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc
src/test/librbd/image/test_mock_RefreshRequest.cc
src/test/librbd/mock/io/MockImageRequestWQ.h
src/test/librbd/test_mock_ExclusiveLock.cc

index a3d69fce89fbed9b7b48a1f5f910f5d6bf4c5c31..f126ccbb482ab655eb406618f24faaa31c9e6a45 100644 (file)
@@ -147,8 +147,14 @@ template <typename I>
 void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
   ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
 
-  if ((features & RBD_FEATURE_JOURNALING) != 0) {
-    m_image_ctx.io_work_queue->set_require_lock_on_read();
+  {
+    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    if (m_image_ctx.clone_copy_on_read ||
+        (features & RBD_FEATURE_JOURNALING) != 0) {
+      m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
+    } else {
+      m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true);
+    }
   }
 
   Mutex::Locker locker(ML<I>::m_lock);
@@ -161,7 +167,7 @@ void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
 
   {
     RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
-    m_image_ctx.io_work_queue->clear_require_lock_on_read();
+    m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
     m_image_ctx.exclusive_lock = nullptr;
   }
 
@@ -269,7 +275,7 @@ void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
 
   if (r >= 0) {
     m_image_ctx.image_watcher->notify_acquired_lock();
-    m_image_ctx.io_work_queue->clear_require_lock_on_read();
+    m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
     m_image_ctx.io_work_queue->unblock_writes();
   }
 
@@ -311,7 +317,7 @@ void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
   } else {
     {
       RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
-      m_image_ctx.io_work_queue->clear_require_lock_on_read();
+      m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
       m_image_ctx.exclusive_lock = nullptr;
     }
 
index 5a37acc96a53301a06e39c4a97e19d18a38166a7..1fd6e2bfd22a8acc65ad2abc1254f750696b8b09 100644 (file)
@@ -109,8 +109,11 @@ void PreReleaseRequest<I>::send_block_writes() {
 
   {
     RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
-      m_image_ctx.io_work_queue->set_require_lock_on_read();
+    if (m_image_ctx.clone_copy_on_read ||
+        m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
+      m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
+    } else {
+      m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true);
     }
     m_image_ctx.io_work_queue->block_writes(ctx);
   }
index 7397b681a7164557ecd4463f5bb1398065b8194f..f8448dc0e015911d36269fa25c1a6551d788b217 100644 (file)
@@ -644,7 +644,8 @@ void RefreshRequest<I>::send_v2_open_journal() {
         !journal_disabled_by_policy &&
         m_image_ctx.exclusive_lock != nullptr &&
         m_image_ctx.journal == nullptr) {
-      m_image_ctx.io_work_queue->set_require_lock_on_read();
+      m_image_ctx.io_work_queue->set_require_lock(librbd::io::DIRECTION_BOTH,
+                                                  true);
     }
     send_v2_block_writes();
     return;
@@ -1077,7 +1078,6 @@ void RefreshRequest<I>::apply() {
       // object map and journaling
       assert(m_exclusive_lock == nullptr);
       m_exclusive_lock = m_image_ctx.exclusive_lock;
-      m_image_ctx.io_work_queue->clear_require_lock_on_read();
     } else {
       if (m_exclusive_lock != nullptr) {
         assert(m_image_ctx.exclusive_lock == nullptr);
@@ -1085,8 +1085,9 @@ void RefreshRequest<I>::apply() {
       }
       if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
                                      m_image_ctx.snap_lock)) {
-        if (m_image_ctx.journal != nullptr) {
-          m_image_ctx.io_work_queue->clear_require_lock_on_read();
+        if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
+          m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_READ,
+                                                      false);
         }
         std::swap(m_journal, m_image_ctx.journal);
       } else if (m_journal != nullptr) {
@@ -1097,10 +1098,6 @@ void RefreshRequest<I>::apply() {
           m_object_map != nullptr) {
         std::swap(m_object_map, m_image_ctx.object_map);
       }
-      if (m_image_ctx.clone_copy_on_read &&
-          m_image_ctx.io_work_queue->is_lock_required()) {
-        m_image_ctx.io_work_queue->set_require_lock_on_read();
-      }
     }
   }
 }
index d80606af515a914b868a5d517f5b23d47a6cadce..577aa3801fee337ad14b02a34e7874af4c5960d2 100644 (file)
@@ -392,28 +392,36 @@ void ImageRequestWQ<I>::unblock_writes() {
 }
 
 template <typename I>
-void ImageRequestWQ<I>::set_require_lock_on_read() {
-  CephContext *cct = m_image_ctx.cct;
-  ldout(cct, 20) << dendl;
-
-  RWLock::WLocker locker(m_lock);
-  m_require_lock_on_read = true;
-}
-
-template <typename I>
-void ImageRequestWQ<I>::clear_require_lock_on_read() {
+void ImageRequestWQ<I>::set_require_lock(Direction direction, bool enabled) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << dendl;
 
+  bool wake_up = false;
   {
     RWLock::WLocker locker(m_lock);
-    if (!m_require_lock_on_read) {
-      return;
+    switch (direction) {
+    case DIRECTION_READ:
+      wake_up = (enabled != m_require_lock_on_read);
+      m_require_lock_on_read = enabled;
+      break;
+    case DIRECTION_WRITE:
+      wake_up = (enabled != m_require_lock_on_write);
+      m_require_lock_on_write = enabled;
+      break;
+    case DIRECTION_BOTH:
+      wake_up = (enabled != m_require_lock_on_read ||
+                 enabled != m_require_lock_on_write);
+      m_require_lock_on_read = enabled;
+      m_require_lock_on_write = enabled;
+      break;
     }
+  }
 
-    m_require_lock_on_read = false;
+  // wake up the thread pool whenever the state changes so that
+  // we can re-request the lock if required
+  if (wake_up) {
+    this->signal();
   }
-  this->signal();
 }
 
 template <typename I>
index 6abfbdd7d99a861a6971a681d2ab9b3f9c7e6f1d..b3a91c3010bf19102d5d867bc49d973ff4c36145 100644 (file)
@@ -7,6 +7,7 @@
 #include "include/Context.h"
 #include "common/RWLock.h"
 #include "common/WorkQueue.h"
+#include "librbd/io/Types.h"
 
 #include <list>
 #include <atomic>
@@ -62,8 +63,7 @@ public:
   void block_writes(Context *on_blocked);
   void unblock_writes();
 
-  void set_require_lock_on_read();
-  void clear_require_lock_on_read();
+  void set_require_lock(Direction direction, bool enabled);
 
 protected:
   void *_void_dequeue() override;
@@ -101,6 +101,7 @@ private:
   Contexts m_write_blocker_contexts;
   uint32_t m_write_blockers = 0;
   bool m_require_lock_on_read = false;
+  bool m_require_lock_on_write = false;
   std::atomic<unsigned> m_queued_reads { 0 };
   std::atomic<unsigned> m_queued_writes { 0 };
   std::atomic<unsigned> m_in_flight_ios { 0 };
index 874e1dd3e28eabd145a825ecd409d539ce2d0538..006b222984230fe6229c8ca92a83117dae64b85c 100644 (file)
@@ -23,6 +23,12 @@ typedef enum {
   AIO_TYPE_WRITESAME,
 } aio_type_t;
 
+enum Direction {
+  DIRECTION_READ,
+  DIRECTION_WRITE,
+  DIRECTION_BOTH
+};
+
 typedef std::vector<std::pair<uint64_t, uint64_t> > Extents;
 typedef std::map<uint64_t, uint64_t> ExtentMap;
 
index 99de2551212a501990a6ecc8a3943bcd3cea5fac..44aa012568d12d935408ad6398146e2e806cb153 100644 (file)
@@ -53,15 +53,21 @@ public:
                   .WillOnce(Return(enabled));
   }
 
-  void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock_on_read());
+  void expect_set_require_lock(MockImageCtx &mock_image_ctx,
+                               librbd::io::Direction direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction,
+                                                                enabled));
   }
 
   void expect_block_writes(MockImageCtx &mock_image_ctx, int r) {
     expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
                          ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0));
-    if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
-      expect_set_require_lock_on_read(mock_image_ctx);
+    if (mock_image_ctx.clone_copy_on_read ||
+        (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+      expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_BOTH, true);
+    } else {
+      expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_WRITE,
+                              true);
     }
     EXPECT_CALL(*mock_image_ctx.io_work_queue, block_writes(_))
                   .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
index 858e98e197cdf1841b70360a80920d2ca62d7dc4..5c50d8131a691e7dc0b20664d3d9fbef652a86d0 100644 (file)
@@ -97,16 +97,10 @@ public:
   typedef RefreshRequest<MockRefreshImageCtx> MockRefreshRequest;
   typedef RefreshParentRequest<MockRefreshImageCtx> MockRefreshParentRequest;
 
-  void expect_is_lock_required(MockRefreshImageCtx &mock_image_ctx, bool require_lock) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, is_lock_required()).WillOnce(Return(require_lock));
-  }
-
-  void expect_set_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock_on_read());
-  }
-
-  void expect_clear_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, clear_require_lock_on_read());
+  void expect_set_require_lock(MockRefreshImageCtx &mock_image_ctx,
+                               librbd::io::Direction direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction,
+                                                                enabled));
   }
 
   void expect_v1_read_header(MockRefreshImageCtx &mock_image_ctx, int r) {
@@ -637,7 +631,6 @@ TEST_F(TestMockImageRefreshRequest, DisableExclusiveLock) {
   expect_get_flags(mock_image_ctx, 0);
   expect_get_group(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_clear_require_lock_on_read(mock_image_ctx);
   expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0);
 
   C_SaferCond ctx;
@@ -742,58 +735,6 @@ TEST_F(TestMockImageRefreshRequest, JournalDisabledByPolicy) {
   ASSERT_EQ(0, ctx.wait());
 }
 
-TEST_F(TestMockImageRefreshRequest, ExclusiveLockWithCoR) {
-  REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
-  std::string val;
-  ASSERT_EQ(0, _rados.conf_get("rbd_clone_copy_on_read", val));
-  if (val == "false") {
-    std::cout << "SKIPPING due to disabled rbd_copy_on_read" << std::endl;
-    return;
-  }
-
-  librbd::ImageCtx *ictx;
-  ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
-  MockRefreshImageCtx mock_image_ctx(*ictx);
-  MockRefreshParentRequest mock_refresh_parent_request;
-
-  MockExclusiveLock mock_exclusive_lock;
-  mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
-
-  if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
-    ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING,
-                                                   false));
-  }
-
-  if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) {
-    ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_FAST_DIFF,
-                                                   false));
-  }
-
-  if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
-    ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_OBJECT_MAP,
-                                                   false));
-  }
-
-  expect_op_work_queue(mock_image_ctx);
-  expect_test_features(mock_image_ctx);
-
-  InSequence seq;
-  expect_get_mutable_metadata(mock_image_ctx, 0);
-  expect_get_flags(mock_image_ctx, 0);
-  expect_get_group(mock_image_ctx, 0);
-  expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_is_lock_required(mock_image_ctx, true);
-  expect_set_require_lock_on_read(mock_image_ctx);
-
-  C_SaferCond ctx;
-  MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
-  req->send();
-
-  ASSERT_EQ(0, ctx.wait());
-}
-
 TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) {
   REQUIRE_FEATURE(RBD_FEATURE_JOURNALING);
 
@@ -873,7 +814,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithoutExclusiveLock) {
   expect_get_flags(mock_image_ctx, 0);
   expect_get_group(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
-  expect_set_require_lock_on_read(mock_image_ctx);
+  expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_BOTH, true);
 
   C_SaferCond ctx;
   MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
@@ -917,7 +858,9 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) {
   expect_get_group(mock_image_ctx, 0);
   expect_refresh_parent_is_required(mock_refresh_parent_request, false);
   expect_block_writes(mock_image_ctx, 0);
-  expect_clear_require_lock_on_read(mock_image_ctx);
+  if (!mock_image_ctx.clone_copy_on_read) {
+    expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_READ, false);
+  }
   expect_close_journal(mock_image_ctx, *mock_journal, 0);
   expect_unblock_writes(mock_image_ctx);
 
index 049d1bc233e0f4313529ce4f9cf2298a9cd9918a..c3187e086cf42f1d958a9228b1e42ecd25ace957 100644 (file)
@@ -5,6 +5,7 @@
 #define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_REQUEST_WQ_H
 
 #include "gmock/gmock.h"
+#include "librbd/io/Types.h"
 
 class Context;
 
@@ -15,10 +16,8 @@ struct MockImageRequestWQ {
   MOCK_METHOD1(block_writes, void(Context *));
   MOCK_METHOD0(unblock_writes, void());
 
-  MOCK_METHOD0(set_require_lock_on_read, void());
-  MOCK_METHOD0(clear_require_lock_on_read, void());
+  MOCK_METHOD2(set_require_lock, void(Direction, bool));
 
-  MOCK_CONST_METHOD0(is_lock_required, bool());
   MOCK_CONST_METHOD0(is_lock_request_needed, bool());
 };
 
index 2a3388bdb39a04bc94b63113161a7de78cbd56d8..6496af5089be498080a3ff1891903a1bfd202a42 100644 (file)
@@ -214,24 +214,25 @@ public:
       .WillOnce(Return(ret_val));
   }
 
-  void expect_set_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock_on_read());
-  }
-
-  void expect_clear_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
-    EXPECT_CALL(*mock_image_ctx.io_work_queue, clear_require_lock_on_read());
+  void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx,
+                               io::Direction direction, bool enabled) {
+    EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction,
+                                                                enabled));
   }
 
   void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
     EXPECT_CALL(*mock_image_ctx.io_work_queue, block_writes(_))
                   .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
-    if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
-      expect_set_require_lock_on_read(mock_image_ctx);
+    if (mock_image_ctx.clone_copy_on_read ||
+        (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+      expect_set_require_lock(mock_image_ctx, io::DIRECTION_BOTH, true);
+    } else {
+      expect_set_require_lock(mock_image_ctx, io::DIRECTION_WRITE, true);
     }
   }
 
   void expect_unblock_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
-    expect_clear_require_lock_on_read(mock_image_ctx);
+    expect_set_require_lock(mock_image_ctx, io::DIRECTION_BOTH, false);
     EXPECT_CALL(*mock_image_ctx.io_work_queue, unblock_writes());
   }