// object map and journaling
assert(m_exclusive_lock == nullptr);
m_exclusive_lock = m_image_ctx.exclusive_lock;
+ m_image_ctx.aio_work_queue->clear_require_lock_on_read();
} else {
if (m_exclusive_lock != nullptr) {
assert(m_image_ctx.exclusive_lock == nullptr);
m_object_map != nullptr) {
std::swap(m_object_map, m_image_ctx.object_map);
}
+ if (m_image_ctx.clone_copy_on_read &&
+ m_image_ctx.aio_work_queue->is_lock_required()) {
+ m_image_ctx.aio_work_queue->set_require_lock_on_read();
+ }
}
}
}
typedef RefreshRequest<MockRefreshImageCtx> MockRefreshRequest;
typedef RefreshParentRequest<MockRefreshImageCtx> MockRefreshParentRequest;
+ void expect_is_lock_required(MockRefreshImageCtx &mock_image_ctx, bool require_lock) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, is_lock_required()).WillOnce(Return(require_lock));
+ }
+
void expect_set_require_lock_on_read(MockRefreshImageCtx &mock_image_ctx) {
EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
}
expect_get_mutable_metadata(mock_image_ctx, 0);
expect_get_flags(mock_image_ctx, 0);
expect_refresh_parent_is_required(mock_refresh_parent_request, false);
+ expect_clear_require_lock_on_read(mock_image_ctx);
expect_shut_down_exclusive_lock(mock_image_ctx, *mock_exclusive_lock, 0);
C_SaferCond ctx;
ASSERT_EQ(0, ctx.wait());
}
+TEST_F(TestMockImageRefreshRequest, ExclusiveLockWithCoR) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ std::string val;
+ ASSERT_EQ(0, _rados.conf_get("rbd_clone_copy_on_read", val));
+ if (val == "false") {
+ std::cout << "SKIPPING due to disabled rbd_copy_on_read" << std::endl;
+ return;
+ }
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockRefreshImageCtx mock_image_ctx(*ictx);
+ MockRefreshParentRequest mock_refresh_parent_request;
+
+ MockExclusiveLock mock_exclusive_lock;
+ mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
+
+ if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
+ ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING,
+ false));
+ }
+
+ if (ictx->test_features(RBD_FEATURE_FAST_DIFF)) {
+ ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_FAST_DIFF,
+ false));
+ }
+
+ if (ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
+ ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_OBJECT_MAP,
+ false));
+ }
+
+ expect_op_work_queue(mock_image_ctx);
+ expect_test_features(mock_image_ctx);
+
+ InSequence seq;
+ expect_get_mutable_metadata(mock_image_ctx, 0);
+ expect_get_flags(mock_image_ctx, 0);
+ expect_get_group(mock_image_ctx, 0);
+ expect_refresh_parent_is_required(mock_refresh_parent_request, false);
+ expect_is_lock_required(mock_image_ctx, true);
+ expect_set_require_lock_on_read(mock_image_ctx);
+
+ C_SaferCond ctx;
+ MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx);
+ req->send();
+
+ ASSERT_EQ(0, ctx.wait());
+}
+
TEST_F(TestMockImageRefreshRequest, EnableJournalWithExclusiveLock) {
REQUIRE_FEATURE(RBD_FEATURE_JOURNALING);
object_set(image_ctx.object_set),
old_format(image_ctx.old_format),
read_only(image_ctx.read_only),
+ clone_copy_on_read(image_ctx.clone_copy_on_read),
lockers(image_ctx.lockers),
exclusive_locked(image_ctx.exclusive_locked),
lock_tag(image_ctx.lock_tag),
bool old_format;
bool read_only;
+ bool clone_copy_on_read;
+
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> lockers;
bool exclusive_locked;