}
template <typename I>
-void AioImageRequestWQ<I>::set_require_lock_on_read() {
+void AioImageRequestWQ<I>::set_require_lock(AioDirection aio_direction,
+ bool enabled) {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << __func__ << dendl;
-
- RWLock::WLocker locker(m_lock);
- m_require_lock_on_read = true;
-}
-
-template <typename I>
-void AioImageRequestWQ<I>::clear_require_lock_on_read() {
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << __func__ << dendl;
+ ldout(cct, 20) << dendl;
+ bool wake_up = false;
{
- RWLock::WLocker locker(m_lock);
- if (!m_require_lock_on_read) {
- return;
+ switch (aio_direction) {
+ case AIO_DIRECTION_READ:
+ wake_up = (enabled != m_require_lock_on_read);
+ m_require_lock_on_read = enabled;
+ break;
+ case AIO_DIRECTION_WRITE:
+ wake_up = (enabled != m_require_lock_on_write);
+ m_require_lock_on_write = enabled;
+ break;
+ case AIO_DIRECTION_BOTH:
+ wake_up = (enabled != m_require_lock_on_read ||
+ enabled != m_require_lock_on_write);
+ m_require_lock_on_read = enabled;
+ m_require_lock_on_write = enabled;
+ break;
}
+ }
- m_require_lock_on_read = false;
+ // wake up the thread pool whenever the state changes so that
+ // we can re-request the lock if required
+ if (wake_up) {
+ this->signal();
}
- this->signal();
}
template <typename I>
template <typename> class AioImageRequest;
class ImageCtx;
+enum AioDirection {
+ AIO_DIRECTION_READ,
+ AIO_DIRECTION_WRITE,
+ AIO_DIRECTION_BOTH
+};
+
template <typename ImageCtxT = librbd::ImageCtx>
class AioImageRequestWQ
: protected ThreadPool::PointerWQ<AioImageRequest<ImageCtxT> > {
void block_writes(Context *on_blocked);
void unblock_writes();
- void set_require_lock_on_read();
- void clear_require_lock_on_read();
+ void set_require_lock(AioDirection aio_direction, bool enabled);
protected:
virtual void *_void_dequeue();
Contexts m_write_blocker_contexts;
uint32_t m_write_blockers = 0;
bool m_require_lock_on_read = false;
+ bool m_require_lock_on_write = false;
atomic_t m_in_flight_writes {0};
atomic_t m_queued_reads {0};
atomic_t m_queued_writes {0};
m_state = STATE_INITIALIZING;
}
- m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init));
- if ((features & RBD_FEATURE_JOURNALING) != 0) {
- m_image_ctx.aio_work_queue->set_require_lock_on_read();
+ if (m_image_ctx.clone_copy_on_read ||
+ (features & RBD_FEATURE_JOURNALING) != 0) {
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
+ } else {
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true);
}
+ m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init));
}
template <typename I>
if (next_state == STATE_LOCKED) {
m_image_ctx.image_watcher->notify_acquired_lock();
- m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
m_image_ctx.aio_work_queue->unblock_writes();
}
lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r)
<< dendl;
} else {
- m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
m_image_ctx.aio_work_queue->unblock_writes();
}
m_image_ctx.exclusive_lock = nullptr;
}
- m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, false);
m_image_ctx.aio_work_queue->unblock_writes();
m_image_ctx.image_watcher->flush(util::create_context_callback<
ExclusiveLock<I>, &ExclusiveLock<I>::complete_shutdown>(this));
{
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
- m_image_ctx.aio_work_queue->set_require_lock_on_read();
+ if (m_image_ctx.clone_copy_on_read ||
+ m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
+ } else {
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_WRITE, true);
}
m_image_ctx.aio_work_queue->block_writes(ctx);
}
!journal_disabled_by_policy &&
m_image_ctx.exclusive_lock != nullptr &&
m_image_ctx.journal == nullptr) {
- m_image_ctx.aio_work_queue->set_require_lock_on_read();
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_BOTH, true);
}
send_v2_block_writes();
return;
// object map and journaling
assert(m_exclusive_lock == nullptr);
m_exclusive_lock = m_image_ctx.exclusive_lock;
- m_image_ctx.aio_work_queue->clear_require_lock_on_read();
} else {
if (m_exclusive_lock != nullptr) {
assert(m_image_ctx.exclusive_lock == nullptr);
}
if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
m_image_ctx.snap_lock)) {
- if (m_image_ctx.journal != nullptr) {
- m_image_ctx.aio_work_queue->clear_require_lock_on_read();
+ if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
+ m_image_ctx.aio_work_queue->set_require_lock(AIO_DIRECTION_READ, false);
}
std::swap(m_journal, m_image_ctx.journal);
} else if (m_journal != nullptr) {
m_object_map != nullptr) {
std::swap(m_object_map, m_image_ctx.object_map);
}
- if (m_image_ctx.clone_copy_on_read &&
- m_image_ctx.aio_work_queue->is_lock_required()) {
- m_image_ctx.aio_work_queue->set_require_lock_on_read();
- }
}
}
}
.WillOnce(Return(enabled));
}
- void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
+ void expect_set_require_lock(MockImageCtx &mock_image_ctx,
+ AioDirection direction, bool enabled) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+ enabled));
}
void expect_block_writes(MockImageCtx &mock_image_ctx, int r) {
expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0));
- if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
- expect_set_require_lock_on_read(mock_image_ctx);
+ if (mock_image_ctx.clone_copy_on_read ||
+ (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
+ } else {
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true);
}
EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
.WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
typedef RefreshRequest<MockRefreshImageCtx> MockRefreshRequest;
typedef RefreshParentRequest<MockRefreshImageCtx> MockRefreshParentRequest;
- void expect_is_lock_required(MockRefreshImageCtx &mock_image_ctx, bool require_lock) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, is_lock_required()).WillOnce(Return(require_lock));
- }
-
- void expect_set_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
- }
-
- void expect_clear_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read());
+ void expect_set_require_lock(MockRefreshImageCtx &mock_image_ctx,
+ AioDirection direction, bool enabled) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+ enabled));
}
void expect_v1_read_header(MockRefreshImageCtx &mock_image_ctx, int r) {
expect_get_mutable_metadata(mock_image_ctx, 0);
expect_get_flags(mock_image_ctx, 0);
expect_refresh_parent_is_required(mock_refresh_parent_request, false);
- expect_clear_require_lock_on_read(mock_image_ctx);
expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0);
C_SaferCond ctx;
ASSERT_EQ(0, ctx.wait());
}
-TEST_F(TestMockImageRefreshRequest, ExclusiveLockWithCoR) {
- REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
-
- REQUIRE(!is_feature_enabled(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF | RBD_FEATURE_JOURNALING))
-
- std::string val;
- ASSERT_EQ(0, _rados.conf_get("rbd_clone_copy_on_read", val));
- if (val == "false") {
- std::cout << "SKIPPING due to disabled rbd_copy_on_read" << std::endl;
- return;
- }
-
- librbd::ImageCtx *ictx;
- ASSERT_EQ(0, open_image(m_image_name, &ictx));
-
- MockRefreshImageCtx mock_image_ctx(*ictx);
- MockRefreshParentRequest mock_refresh_parent_request;
-
- MockExclusiveLock mock_exclusive_lock;
- mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
-
- expect_op_work_queue(mock_image_ctx);
- expect_test_features(mock_image_ctx);
-
- InSequence seq;
- expect_get_mutable_metadata(mock_image_ctx, 0);
- expect_get_flags(mock_image_ctx, 0);
- expect_refresh_parent_is_required(mock_refresh_parent_request, false);
- expect_is_lock_required(mock_image_ctx, true);
- expect_set_require_lock_on_read(mock_image_ctx);
-
- C_SaferCond ctx;
- MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
- req->send();
-
- ASSERT_EQ(0, ctx.wait());
-}
-
TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) {
REQUIRE_FEATURE(RBD_FEATURE_JOURNALING);
expect_get_mutable_metadata(mock_image_ctx, 0);
expect_get_flags(mock_image_ctx, 0);
expect_refresh_parent_is_required(mock_refresh_parent_request, false);
- expect_set_require_lock_on_read(mock_image_ctx);
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
C_SaferCond ctx;
MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
expect_get_flags(mock_image_ctx, 0);
expect_refresh_parent_is_required(mock_refresh_parent_request, false);
expect_block_writes(mock_image_ctx, 0);
- expect_clear_require_lock_on_read(mock_image_ctx);
+ if (!mock_image_ctx.clone_copy_on_read) {
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_READ, false);
+ }
expect_close_journal(mock_image_ctx, *mock_journal, 0);
expect_unblock_writes(mock_image_ctx);
#define CEPH_TEST_LIBRBD_MOCK_AIO_IMAGE_REQUEST_WQ_H
#include "gmock/gmock.h"
+#include "librbd/AioImageRequestWQ.h"
class Context;
MOCK_METHOD1(block_writes, void(Context *));
MOCK_METHOD0(unblock_writes, void());
- MOCK_METHOD0(set_require_lock_on_read, void());
- MOCK_METHOD0(clear_require_lock_on_read, void());
+ MOCK_METHOD2(set_require_lock, void(AioDirection, bool));
- MOCK_CONST_METHOD0(is_lock_required, bool());
MOCK_CONST_METHOD0(is_lock_request_needed, bool());
};
.WillRepeatedly(Return(watch_handle));
}
- void expect_set_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
- }
-
- void expect_clear_require_lock_on_read(MockExclusiveLockImageCtx &mock_image_ctx) {
- EXPECT_CALL(*mock_image_ctx.aio_work_queue, clear_require_lock_on_read());
+ void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx,
+ AioDirection direction, bool enabled) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock(direction,
+ enabled));
}
void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
+ if (mock_image_ctx.clone_copy_on_read ||
+ (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, true);
+ } else {
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_WRITE, true);
+ }
+
EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
.WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
- if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
- expect_set_require_lock_on_read(mock_image_ctx);
- }
}
void expect_unblock_writes(MockExclusiveLockImageCtx &mock_image_ctx) {
- expect_clear_require_lock_on_read(mock_image_ctx);
+ expect_set_require_lock(mock_image_ctx, AIO_DIRECTION_BOTH, false);
EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes());
}