]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
librbd: Propagate EBLOCKLIST in send_acquire_lock
authorChristopher Hoffman <choffman@redhat.com>
Tue, 21 Mar 2023 16:15:04 +0000 (16:15 +0000)
committerChristopher Hoffman <choffman@redhat.com>
Thu, 30 Mar 2023 21:25:56 +0000 (21:25 +0000)
During send_acquire_lock, there's a case where
there's no watcher handle present and lock request is delayed.
If the client is blocklisted, the delayed request will not
continue and the call that requested lock will never complete.

The lock process will now propagate -EBLOCKLIST, to callback
instead of indefinitely delaying.

Fixes: https://tracker.ceph.com/issues/59115
Signed-off-by: Christopher Hoffman <choffman@redhat.com>
src/librbd/ManagedLock.cc
src/test/librbd/test_mock_ManagedLock.cc

index 53a0cf911ec8207dd419315417528d2f6949ed14..7b785a261a399a62b22d93ae6a934b5f30f5b207 100644 (file)
@@ -481,12 +481,17 @@ void ManagedLock<I>::send_acquire_lock() {
 
   uint64_t watch_handle = m_watcher->get_watch_handle();
   if (watch_handle == 0) {
-    lderr(m_cct) << "watcher not registered - delaying request" << dendl;
-    m_state = STATE_WAITING_FOR_REGISTER;
+    if (m_watcher->is_blocklisted()) {
+      lderr(m_cct) << "watcher not registered - client blocklisted" << dendl;
+      complete_active_action(STATE_UNLOCKED, -EBLOCKLISTED);
+    } else {
+      lderr(m_cct) << "watcher not registered - delaying request" << dendl;
+      m_state = STATE_WAITING_FOR_REGISTER;
 
-    // shut down might race w/ release/re-acquire of the lock
-    if (is_state_shutdown()) {
-      complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
+      // shut down might race w/ release/re-acquire of the lock
+      if (is_state_shutdown()) {
+        complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
+      }
     }
     return;
   }
index 800a8ee7ccb8b326157dfb88d31addf8955378af..dfa0f84fe101f3d970028ac29245feb9c8b62305 100644 (file)
@@ -194,6 +194,11 @@ public:
                   .WillOnce(QueueRequest(&acquire_request, r, work_queue));
   }
 
+  void expect_is_blocklisted(MockImageWatcher &watcher,
+                             bool blocklisted) {
+    EXPECT_CALL(watcher, is_blocklisted()).WillOnce(Return(blocklisted));
+  }
+
   void expect_release_lock(asio::ContextWQ *work_queue,
                            MockReleaseRequest &release_request, int r) {
     EXPECT_CALL(release_request, send())
@@ -393,6 +398,26 @@ TEST_F(TestMockManagedLock, AcquireLockBlocklist) {
   ASSERT_EQ(0, when_shut_down(managed_lock));
 }
 
+TEST_F(TestMockManagedLock, AcquireLockBlocklistedWatch) {
+  librbd::ImageCtx *ictx;
+  ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+  MockManagedLockImageCtx mock_image_ctx(*ictx);
+  MockManagedLock managed_lock(ictx->md_ctx, *ictx->asio_engine,
+                               ictx->header_oid, mock_image_ctx.image_watcher,
+                               librbd::managed_lock::EXCLUSIVE, true, 0);
+
+  InSequence seq;
+
+  expect_get_watch_handle(*mock_image_ctx.image_watcher, 0);
+  expect_is_blocklisted(*mock_image_ctx.image_watcher, true);
+
+  ASSERT_EQ(-EBLOCKLISTED, when_acquire_lock(managed_lock));
+  ASSERT_FALSE(is_lock_owner(managed_lock));
+
+  ASSERT_EQ(0, when_shut_down(managed_lock));
+}
+
 TEST_F(TestMockManagedLock, ReleaseLockUnlockedState) {
   librbd::ImageCtx *ictx;
   ASSERT_EQ(0, open_image(m_image_name, &ictx));
@@ -563,6 +588,7 @@ TEST_F(TestMockManagedLock, AttemptReacquireBlocklistedLock) {
   expect_release_lock(ictx->op_work_queue, request_release, 0);
 
   expect_get_watch_handle(*mock_image_ctx.image_watcher, 0);
+  expect_is_blocklisted(*mock_image_ctx.image_watcher, false);
 
   managed_lock.reacquire_lock(nullptr);
 
@@ -684,6 +710,7 @@ TEST_F(TestMockManagedLock, ShutDownWhileWaiting) {
   InSequence seq;
 
   expect_get_watch_handle(*mock_image_ctx.image_watcher, 0);
+  expect_is_blocklisted(*mock_image_ctx.image_watcher, false);
 
   C_SaferCond acquire_ctx;
   managed_lock.acquire_lock(&acquire_ctx);