From 8fb8eb61dd8b2e44cb10da9b28fa334267784e2f Mon Sep 17 00:00:00 2001 From: Jason Dillaman Date: Wed, 30 Aug 2017 22:08:15 -0400 Subject: [PATCH] librbd: directly inform IO work queue when locks are required Due to lock dependency issues between librbd locks and the the thread pool lock, it's impossible to directly determine if the lock is required within the _void_dequeue method. Therefore, this state needs to be internally tracked by the work queue. (derived from commit 4a525671b3541a0a208dd039ac96f42bc8fca2cc) Signed-off-by: Jason Dillaman --- src/librbd/AioImageRequestWQ.cc | 40 ++++++++----- src/librbd/AioImageRequestWQ.h | 10 +++- src/librbd/ExclusiveLock.cc | 15 +++-- src/librbd/exclusive_lock/ReleaseRequest.cc | 7 ++- src/librbd/image/RefreshRequest.cc | 11 +--- .../test_mock_ReleaseRequest.cc | 13 ++-- .../librbd/image/test_mock_RefreshRequest.cc | 59 +++---------------- src/test/librbd/mock/MockAioImageRequestWQ.h | 5 +- src/test/librbd/test_mock_ExclusiveLock.cc | 22 +++---- 9 files changed, 80 insertions(+), 102 deletions(-) diff --git a/src/librbd/AioImageRequestWQ.cc b/src/librbd/AioImageRequestWQ.cc index 3704869d0f093..93bbe6ba37dc4 100644 --- a/src/librbd/AioImageRequestWQ.cc +++ b/src/librbd/AioImageRequestWQ.cc @@ -292,28 +292,36 @@ void AioImageRequestWQ::unblock_writes() { } template -void AioImageRequestWQ::set_require_lock_on_read() { +void AioImageRequestWQ::set_require_lock(AioDirection aio_direction, + bool enabled) { CephContext *cct = m_image_ctx.cct; - ldout(cct, 20) << __func__ << dendl; - - RWLock::WLocker locker(m_lock); - m_require_lock_on_read = true; -} - -template -void AioImageRequestWQ::clear_require_lock_on_read() { - CephContext *cct = m_image_ctx.cct; - ldout(cct, 20) << __func__ << dendl; + ldout(cct, 20) << dendl; + bool wake_up = false; { - RWLock::WLocker locker(m_lock); - if (!m_require_lock_on_read) { - return; + switch (aio_direction) { + case AIO_DIRECTION_READ: + wake_up = (enabled != m_require_lock_on_read); + m_require_lock_on_read = enabled; + break; + case AIO_DIRECTION_WRITE: + wake_up = (enabled != m_require_lock_on_write); + m_require_lock_on_write = enabled; + break; + case AIO_DIRECTION_BOTH: + wake_up = (enabled != m_require_lock_on_read || + enabled != m_require_lock_on_write); + m_require_lock_on_read = enabled; + m_require_lock_on_write = enabled; + break; } + } - m_require_lock_on_read = false; + // wake up the thread pool whenever the state changes so that + // we can re-request the lock if required + if (wake_up) { + this->signal(); } - this->signal(); } template diff --git a/src/librbd/AioImageRequestWQ.h b/src/librbd/AioImageRequestWQ.h index 0b7481c202eb7..c21a23de52bf6 100644 --- a/src/librbd/AioImageRequestWQ.h +++ b/src/librbd/AioImageRequestWQ.h @@ -16,6 +16,12 @@ class AioCompletion; template class AioImageRequest; class ImageCtx; +enum AioDirection { + AIO_DIRECTION_READ, + AIO_DIRECTION_WRITE, + AIO_DIRECTION_BOTH +}; + template class AioImageRequestWQ : protected ThreadPool::PointerWQ > { @@ -52,8 +58,7 @@ public: void block_writes(Context *on_blocked); void unblock_writes(); - void set_require_lock_on_read(); - void clear_require_lock_on_read(); + void set_require_lock(AioDirection aio_direction, bool enabled); protected: virtual void *_void_dequeue(); @@ -91,6 +96,7 @@ private: Contexts m_write_blocker_contexts; uint32_t m_write_blockers = 0; bool m_require_lock_on_read = false; + bool m_require_lock_on_write = false; atomic_t m_in_flight_writes {0}; atomic_t m_queued_reads {0}; atomic_t m_queued_writes {0}; diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc index 43ed31b6c295e..50cd61eb6d708 100644 --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@ -121,10 +121,13 @@ void ExclusiveLock::init(uint64_t features, Context *on_init) { m_state = STATE_INITIALIZING; } - m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init)); - if ((features & RBD_FEATURE_JOURNALING) != 0) { - m_image_ctx.aio_work_queue->set_require_lock_on_read(); + if (m_image_ctx.clone_copy_on_read || + (features & RBD_FEATURE_JOURNALING) != 0) { + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true); + } else { + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true); } + m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init)); } template @@ -513,7 +516,7 @@ void ExclusiveLock::handle_acquire_lock(int r) { if (next_state == STATE_LOCKED) { m_image_ctx.image_watcher->notify_acquired_lock(); - m_image_ctx.aio_work_queue->clear_require_lock_on_read(); + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false); m_image_ctx.aio_work_queue->unblock_writes(); } @@ -732,7 +735,7 @@ void ExclusiveLock::handle_shutdown_released(int r) { lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r) << dendl; } else { - m_image_ctx.aio_work_queue->clear_require_lock_on_read(); + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false); m_image_ctx.aio_work_queue->unblock_writes(); } @@ -750,7 +753,7 @@ void ExclusiveLock::handle_shutdown(int r) { m_image_ctx.exclusive_lock = nullptr; } - m_image_ctx.aio_work_queue->clear_require_lock_on_read(); + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false); m_image_ctx.aio_work_queue->unblock_writes(); m_image_ctx.image_watcher->flush(util::create_context_callback< ExclusiveLock, &ExclusiveLock::complete_shutdown>(this)); diff --git a/src/librbd/exclusive_lock/ReleaseRequest.cc b/src/librbd/exclusive_lock/ReleaseRequest.cc index fbe2854fbf491..552ca065816d7 100644 --- a/src/librbd/exclusive_lock/ReleaseRequest.cc +++ b/src/librbd/exclusive_lock/ReleaseRequest.cc @@ -116,8 +116,11 @@ void ReleaseRequest::send_block_writes() { { RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { - m_image_ctx.aio_work_queue->set_require_lock_on_read(); + if (m_image_ctx.clone_copy_on_read || + m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true); + } else { + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true); } m_image_ctx.aio_work_queue->block_writes(ctx); } diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc index 3fcedeab33a90..ffaa5790f2377 100644 --- a/src/librbd/image/RefreshRequest.cc +++ b/src/librbd/image/RefreshRequest.cc @@ -499,7 +499,7 @@ void RefreshRequest::send_v2_open_journal() { !journal_disabled_by_policy && m_image_ctx.exclusive_lock != nullptr && m_image_ctx.journal == nullptr) { - m_image_ctx.aio_work_queue->set_require_lock_on_read(); + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true); } send_v2_block_writes(); return; @@ -929,7 +929,6 @@ void RefreshRequest::apply() { // object map and journaling assert(m_exclusive_lock == nullptr); m_exclusive_lock = m_image_ctx.exclusive_lock; - m_image_ctx.aio_work_queue->clear_require_lock_on_read(); } else { if (m_exclusive_lock != nullptr) { assert(m_image_ctx.exclusive_lock == nullptr); @@ -937,8 +936,8 @@ void RefreshRequest::apply() { } if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING, m_image_ctx.snap_lock)) { - if (m_image_ctx.journal != nullptr) { - m_image_ctx.aio_work_queue->clear_require_lock_on_read(); + if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) { + m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_READ, false); } std::swap(m_journal, m_image_ctx.journal); } else if (m_journal != nullptr) { @@ -949,10 +948,6 @@ void RefreshRequest::apply() { m_object_map != nullptr) { std::swap(m_object_map, m_image_ctx.object_map); } - if (m_image_ctx.clone_copy_on_read && - m_image_ctx.aio_work_queue->is_lock_required()) { - m_image_ctx.aio_work_queue->set_require_lock_on_read(); - } } } } diff --git a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc index f15c2ff29b138..73bc8d4e85102 100644 --- a/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc +++ b/src/test/librbd/exclusive_lock/test_mock_ReleaseRequest.cc @@ -51,15 +51,20 @@ public: .WillOnce(Return(enabled)); } - void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read()); + void expect_set_require_lock(MockImageCtx &mock_image_ctx, + AioDirection direction, bool enabled) { + EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction, + enabled)); } void expect_block_writes(MockImageCtx &mock_image_ctx, int r) { expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING, ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0)); - if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { - expect_set_require_lock_on_read(mock_image_ctx); + if (mock_image_ctx.clone_copy_on_read || + (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true); + } else { + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true); } EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); diff --git a/src/test/librbd/image/test_mock_RefreshRequest.cc b/src/test/librbd/image/test_mock_RefreshRequest.cc index 2938a9fef923e..5f8b813ee10b8 100644 --- a/src/test/librbd/image/test_mock_RefreshRequest.cc +++ b/src/test/librbd/image/test_mock_RefreshRequest.cc @@ -97,16 +97,10 @@ public: typedef RefreshRequest MockRefreshRequest; typedef RefreshParentRequest MockRefreshParentRequest; - void expect_is_lock_required(MockRefreshImageCtx &mock_image_ctx, bool require_lock) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, is_lock_required()).WillOnce(Return(require_lock)); - } - - void expect_set_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read()); - } - - void expect_clear_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read()); + void expect_set_require_lock(MockRefreshImageCtx &mock_image_ctx, + AioDirection direction, bool enabled) { + EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction, + enabled)); } void expect_v1_read_header(MockRefreshImageCtx &mock_image_ctx, int r) { @@ -590,7 +584,6 @@ TEST_F(TestMockImageRefreshRequest, DisableExclusiveLock) { expect_get_mutable_metadata(mock_image_ctx, 0); expect_get_flags(mock_image_ctx, 0); expect_refresh_parent_is_required(mock_refresh_parent_request, false); - expect_clear_require_lock_on_read(mock_image_ctx); expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0); C_SaferCond ctx; @@ -687,44 +680,6 @@ TEST_F(TestMockImageRefreshRequest, JournalDisabledByPolicy) { ASSERT_EQ(0, ctx.wait()); } -TEST_F(TestMockImageRefreshRequest, ExclusiveLockWithCoR) { - REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); - - REQUIRE(!is_feature_enabled(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF | RBD_FEATURE_JOURNALING)) - - std::string val; - ASSERT_EQ(0, _rados.conf_get("rbd_clone_copy_on_read", val)); - if (val == "false") { - std::cout << "SKIPPING due to disabled rbd_copy_on_read" << std::endl; - return; - } - - librbd::ImageCtx *ictx; - ASSERT_EQ(0, open_image(m_image_name, &ictx)); - - MockRefreshImageCtx mock_image_ctx(*ictx); - MockRefreshParentRequest mock_refresh_parent_request; - - MockExclusiveLock mock_exclusive_lock; - mock_image_ctx.exclusive_lock = &mock_exclusive_lock; - - expect_op_work_queue(mock_image_ctx); - expect_test_features(mock_image_ctx); - - InSequence seq; - expect_get_mutable_metadata(mock_image_ctx, 0); - expect_get_flags(mock_image_ctx, 0); - expect_refresh_parent_is_required(mock_refresh_parent_request, false); - expect_is_lock_required(mock_image_ctx, true); - expect_set_require_lock_on_read(mock_image_ctx); - - C_SaferCond ctx; - MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx); - req->send(); - - ASSERT_EQ(0, ctx.wait()); -} - TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) { REQUIRE_FEATURE(RBD_FEATURE_JOURNALING); @@ -798,7 +753,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithoutExclusiveLock) { expect_get_mutable_metadata(mock_image_ctx, 0); expect_get_flags(mock_image_ctx, 0); expect_refresh_parent_is_required(mock_refresh_parent_request, false); - expect_set_require_lock_on_read(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true); C_SaferCond ctx; MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx); @@ -840,7 +795,9 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) { expect_get_flags(mock_image_ctx, 0); expect_refresh_parent_is_required(mock_refresh_parent_request, false); expect_block_writes(mock_image_ctx, 0); - expect_clear_require_lock_on_read(mock_image_ctx); + if (!mock_image_ctx.clone_copy_on_read) { + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_READ, false); + } expect_close_journal(mock_image_ctx, *mock_journal, 0); expect_unblock_writes(mock_image_ctx); diff --git a/src/test/librbd/mock/MockAioImageRequestWQ.h b/src/test/librbd/mock/MockAioImageRequestWQ.h index 2b1cefee405fb..0f3efac64c7fa 100644 --- a/src/test/librbd/mock/MockAioImageRequestWQ.h +++ b/src/test/librbd/mock/MockAioImageRequestWQ.h @@ -5,6 +5,7 @@ #define CEPH_TEST_LIBRBD_MOCK_AIO_IMAGE_REQUEST_WQ_H #include "gmock/gmock.h" +#include "librbd/AioImageRequestWQ.h" class Context; @@ -14,10 +15,8 @@ struct MockAioImageRequestWQ { MOCK_METHOD1(block_writes, void(Context *)); MOCK_METHOD0(unblock_writes, void()); - MOCK_METHOD0(set_require_lock_on_read, void()); - MOCK_METHOD0(clear_require_lock_on_read, void()); + MOCK_METHOD2(set_require_lock, void(AioDirection, bool)); - MOCK_CONST_METHOD0(is_lock_required, bool()); MOCK_CONST_METHOD0(is_lock_request_needed, bool()); }; diff --git a/src/test/librbd/test_mock_ExclusiveLock.cc b/src/test/librbd/test_mock_ExclusiveLock.cc index df6a4ae35f113..df74828c97788 100644 --- a/src/test/librbd/test_mock_ExclusiveLock.cc +++ b/src/test/librbd/test_mock_ExclusiveLock.cc @@ -106,24 +106,26 @@ public: .WillRepeatedly(Return(watch_handle)); } - void expect_set_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read()); - } - - void expect_clear_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read()); + void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx, + AioDirection direction, bool enabled) { + EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction, + enabled)); } void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx) { + if (mock_image_ctx.clone_copy_on_read || + (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true); + } else { + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true); + } + EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); - if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { - expect_set_require_lock_on_read(mock_image_ctx); - } } void expect_unblock_writes(MockExclusiveLockImageCtx &mock_image_ctx) { - expect_clear_require_lock_on_read(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, false); EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes()); } -- 2.39.5