From 43b5c480ee8ea38f67d18c917853f70c1b226d36 Mon Sep 17 00:00:00 2001 From: Jason Dillaman Date: Wed, 29 Apr 2020 19:17:12 -0400 Subject: [PATCH] librbd: exclusive lock image IO dispatch layer This layer will handle acquiring the exclusive lock if required upon incoming IO requests. Signed-off-by: Jason Dillaman --- src/librbd/CMakeLists.txt | 1 + src/librbd/ExclusiveLock.cc | 129 ++++---- src/librbd/ExclusiveLock.h | 13 +- src/librbd/exclusive_lock/ImageDispatch.cc | 297 ++++++++++++++++++ src/librbd/exclusive_lock/ImageDispatch.h | 110 +++++++ .../exclusive_lock/PreReleaseRequest.cc | 39 ++- src/librbd/exclusive_lock/PreReleaseRequest.h | 13 +- src/librbd/image/RefreshRequest.cc | 12 +- .../test_mock_PreReleaseRequest.cc | 113 ++++--- .../librbd/image/test_mock_RefreshRequest.cc | 20 +- src/test/librbd/mock/MockExclusiveLock.h | 4 + src/test/librbd/mock/io/MockImageDispatch.h | 81 +++++ src/test/librbd/test_mock_ExclusiveLock.cc | 147 +++++++-- 13 files changed, 819 insertions(+), 160 deletions(-) create mode 100644 src/librbd/exclusive_lock/ImageDispatch.cc create mode 100644 src/librbd/exclusive_lock/ImageDispatch.h create mode 100644 src/test/librbd/mock/io/MockImageDispatch.h diff --git a/src/librbd/CMakeLists.txt b/src/librbd/CMakeLists.txt index c7fe73f02ab3e..0ecd0258c930f 100644 --- a/src/librbd/CMakeLists.txt +++ b/src/librbd/CMakeLists.txt @@ -48,6 +48,7 @@ set(librbd_internal_srcs deep_copy/SnapshotCreateRequest.cc deep_copy/Utils.cc exclusive_lock/AutomaticPolicy.cc + exclusive_lock/ImageDispatch.cc exclusive_lock/PreAcquireRequest.cc exclusive_lock/PostAcquireRequest.cc exclusive_lock/PreReleaseRequest.cc diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc index 1250c733d676b..8568ae957a86b 100644 --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@ -5,9 +5,11 @@ #include "librbd/ImageCtx.h" #include "librbd/ImageWatcher.h" #include "librbd/ImageState.h" +#include "librbd/exclusive_lock/ImageDispatch.h" #include "librbd/exclusive_lock/PreAcquireRequest.h" #include "librbd/exclusive_lock/PostAcquireRequest.h" #include "librbd/exclusive_lock/PreReleaseRequest.h" +#include "librbd/io/ImageDispatcher.h" #include "librbd/io/ImageRequestWQ.h" #include "librbd/Utils.h" #include "common/ceph_mutex.h" @@ -71,6 +73,17 @@ bool ExclusiveLock::accept_ops(const ceph::mutex &lock) const { (ML::is_state_locked() || ML::is_state_post_acquiring())); } +template +void ExclusiveLock::set_require_lock(io::Direction direction, + Context* on_finish) { + m_image_dispatch->set_require_lock(direction, on_finish); +} + +template +void ExclusiveLock::unset_require_lock(io::Direction direction) { + m_image_dispatch->unset_require_lock(direction); +} + template void ExclusiveLock::block_requests(int r) { std::lock_guard locker{ML::m_lock}; @@ -117,8 +130,10 @@ void ExclusiveLock::init(uint64_t features, Context *on_init) { ML::set_state_initializing(); } - m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features, - on_init)); + auto ctx = new LambdaContext([this, features, on_init](int r) { + handle_init_complete(r, features, on_init); + }); + m_image_ctx.io_work_queue->block_writes(ctx); } template @@ -164,21 +179,36 @@ Context *ExclusiveLock::start_op(int* ret_val) { } template -void ExclusiveLock::handle_init_complete(uint64_t features) { +void ExclusiveLock::handle_init_complete(int r, uint64_t features, + Context* on_finish) { + if (r < 0) { + m_image_ctx.io_work_queue->unblock_writes(); + on_finish->complete(r); + return; + } + ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl; - { - std::shared_lock owner_locker{m_image_ctx.owner_lock}; - if (m_image_ctx.clone_copy_on_read || - (features & RBD_FEATURE_JOURNALING) != 0) { - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true); - } else { - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true); - } - } + m_image_dispatch = exclusive_lock::ImageDispatch::create(&m_image_ctx); + m_image_ctx.io_image_dispatcher->register_dispatch(m_image_dispatch); - std::lock_guard locker{ML::m_lock}; - ML::set_state_unlocked(); + on_finish = new LambdaContext([this, on_finish](int r) { + m_image_ctx.io_work_queue->unblock_writes(); + + { + std::lock_guard locker{ML::m_lock}; + ML::set_state_unlocked(); + } + + on_finish->complete(r); + }); + + if (m_image_ctx.clone_copy_on_read || + (features & RBD_FEATURE_JOURNALING) != 0) { + m_image_dispatch->set_require_lock(io::DIRECTION_BOTH, on_finish); + } else { + m_image_dispatch->set_require_lock(io::DIRECTION_WRITE, on_finish); + } } template @@ -187,12 +217,15 @@ void ExclusiveLock::shutdown_handler(int r, Context *on_finish) { { std::unique_lock owner_locker{m_image_ctx.owner_lock}; - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false); m_image_ctx.exclusive_lock = nullptr; } - m_image_ctx.io_work_queue->unblock_writes(); - m_image_ctx.image_watcher->flush(on_finish); + on_finish = new LambdaContext([this, on_finish](int r) { + m_image_dispatch = nullptr; + m_image_ctx.image_watcher->flush(on_finish); + }); + m_image_ctx.io_image_dispatcher->shut_down_dispatch( + m_image_dispatch->get_dispatch_layer(), on_finish); } template @@ -287,21 +320,24 @@ void ExclusiveLock::handle_post_acquired_lock(int r) { Context *on_finish = nullptr; { std::lock_guard locker{ML::m_lock}; - ceph_assert(ML::is_state_acquiring() || ML::is_state_post_acquiring()); + ceph_assert(ML::is_state_acquiring() || + ML::is_state_post_acquiring()); assert (m_pre_post_callback != nullptr); std::swap(m_pre_post_callback, on_finish); } - if (r >= 0) { - m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time, - ceph_clock_now()); - m_image_ctx.image_watcher->notify_acquired_lock(); - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false); - m_image_ctx.io_work_queue->unblock_writes(); + if (r < 0) { + on_finish->complete(r); + return; } - on_finish->complete(r); + m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time, + ceph_clock_now()); + m_image_ctx.image_watcher->notify_acquired_lock(); + m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); + + on_finish->complete(0); } template @@ -310,8 +346,9 @@ void ExclusiveLock::pre_release_lock_handler(bool shutting_down, ldout(m_image_ctx.cct, 10) << dendl; std::lock_guard locker{ML::m_lock}; - PreReleaseRequest *req = PreReleaseRequest::create( - m_image_ctx, shutting_down, m_async_op_tracker, on_finish); + auto req = PreReleaseRequest::create( + m_image_ctx, m_image_dispatch, shutting_down, m_async_op_tracker, + on_finish); m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) { req->send(); })); @@ -325,27 +362,29 @@ void ExclusiveLock::post_release_lock_handler(bool shutting_down, int r, if (!shutting_down) { { std::lock_guard locker{ML::m_lock}; - ceph_assert(ML::is_state_pre_releasing() || ML::is_state_releasing()); + ceph_assert(ML::is_state_pre_releasing() || + ML::is_state_releasing()); } if (r >= 0) { m_image_ctx.image_watcher->notify_released_lock(); } + + on_finish->complete(r); } else { { std::unique_lock owner_locker{m_image_ctx.owner_lock}; - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false); m_image_ctx.exclusive_lock = nullptr; } - if (r >= 0) { - m_image_ctx.io_work_queue->unblock_writes(); - } - - m_image_ctx.image_watcher->notify_released_lock(); + on_finish = new LambdaContext([this, r, on_finish](int) { + m_image_dispatch = nullptr; + m_image_ctx.image_watcher->notify_released_lock(); + on_finish->complete(r); + }); + m_image_ctx.io_image_dispatcher->shut_down_dispatch( + m_image_dispatch->get_dispatch_layer(), on_finish); } - - on_finish->complete(r); } template @@ -358,24 +397,6 @@ void ExclusiveLock::post_reacquire_lock_handler(int r, Context *on_finish) { on_finish->complete(r); } -template -struct ExclusiveLock::C_InitComplete : public Context { - ExclusiveLock *exclusive_lock; - uint64_t features; - Context *on_init; - - C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features, - Context *on_init) - : exclusive_lock(exclusive_lock), features(features), on_init(on_init) { - } - void finish(int r) override { - if (r == 0) { - exclusive_lock->handle_init_complete(features); - } - on_init->complete(r); - } -}; - } // namespace librbd template class librbd::ExclusiveLock; diff --git a/src/librbd/ExclusiveLock.h b/src/librbd/ExclusiveLock.h index a77ea817a48a0..b77c961e2f0cc 100644 --- a/src/librbd/ExclusiveLock.h +++ b/src/librbd/ExclusiveLock.h @@ -7,10 +7,15 @@ #include "common/AsyncOpTracker.h" #include "librbd/ManagedLock.h" #include "librbd/exclusive_lock/Policy.h" +#include "librbd/io/Types.h" #include "common/RefCountedObj.h" +struct Context; + namespace librbd { +namespace exclusive_lock { template struct ImageDispatch; } + template class ExclusiveLock : public RefCountedObject, public ManagedLock { @@ -25,6 +30,9 @@ public: int *ret_val) const; bool accept_ops() const; + void set_require_lock(io::Direction direction, Context* on_finish); + void unset_require_lock(io::Direction direction); + void block_requests(int r); void unblock_requests(); @@ -86,9 +94,8 @@ private: * @endverbatim */ - struct C_InitComplete; - ImageCtxT& m_image_ctx; + exclusive_lock::ImageDispatch* m_image_dispatch = nullptr; Context *m_pre_post_callback = nullptr; AsyncOpTracker m_async_op_tracker; @@ -100,7 +107,7 @@ private: bool accept_ops(const ceph::mutex &lock) const; - void handle_init_complete(uint64_t features); + void handle_init_complete(int r, uint64_t features, Context* on_finish); void handle_post_acquiring_lock(int r); void handle_post_acquired_lock(int r); }; diff --git a/src/librbd/exclusive_lock/ImageDispatch.cc b/src/librbd/exclusive_lock/ImageDispatch.cc new file mode 100644 index 0000000000000..f119d60af2ced --- /dev/null +++ b/src/librbd/exclusive_lock/ImageDispatch.cc @@ -0,0 +1,297 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "librbd/exclusive_lock/ImageDispatch.h" +#include "common/dout.h" +#include "common/errno.h" +#include "common/WorkQueue.h" +#include "librbd/ExclusiveLock.h" +#include "librbd/ImageCtx.h" +#include "librbd/Utils.h" +#include "librbd/exclusive_lock/Policy.h" +#include "librbd/io/ImageRequestWQ.h" + +#define dout_subsys ceph_subsys_rbd +#undef dout_prefix +#define dout_prefix *_dout << "librbd::exclusive_lock::ImageDispatch: " \ + << this << " " << __func__ << ": " + +namespace librbd { +namespace exclusive_lock { + +using util::create_context_callback; + +template +ImageDispatch::ImageDispatch(I* image_ctx) + : m_image_ctx(image_ctx), + m_lock(ceph::make_shared_mutex( + util::unique_lock_name("librbd::exclusve_lock::ImageDispatch::m_lock", + this))) { +} + +template +void ImageDispatch::shut_down(Context* on_finish) { + // TODO + unset_require_lock(io::DIRECTION_BOTH); + + on_finish->complete(0); +} + +template +void ImageDispatch::set_require_lock(io::Direction direction, + Context* on_finish) { + bool blocked = set_require_lock(direction, true); + + // TODO + if (blocked) { + std::shared_lock owner_locker{m_image_ctx->owner_lock}; + m_image_ctx->io_work_queue->block_writes(on_finish); + } else { + on_finish->complete(0); + } +} + +template +void ImageDispatch::unset_require_lock(io::Direction direction) { + bool unblocked = set_require_lock(direction, false); + + // TODO + if (unblocked) { + m_image_ctx->io_work_queue->unblock_writes(); + } +} + +template +bool ImageDispatch::set_require_lock(io::Direction direction, bool enabled) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "direction=" << direction << ", enabled=" << enabled + << dendl; + + // TODO remove when ImageRequestWQ is removed + m_image_ctx->io_work_queue->set_require_lock(direction, enabled); + + std::unique_lock locker{m_lock}; + auto prev_require_lock = (m_require_lock_on_read || m_require_lock_on_write); + + switch (direction) { + case io::DIRECTION_READ: + m_require_lock_on_read = enabled; + break; + case io::DIRECTION_WRITE: + m_require_lock_on_write = enabled; + break; + case io::DIRECTION_BOTH: + m_require_lock_on_read = enabled; + m_require_lock_on_write = enabled; + break; + } + + bool require_lock = (m_require_lock_on_read || m_require_lock_on_write); + return ((enabled && !prev_require_lock && require_lock) || + (!enabled && prev_require_lock && !require_lock)); +} + +template +bool ImageDispatch::read( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + io::ReadResult &&read_result, int op_flags, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "image_extents=" << image_extents << dendl; + + if (needs_exclusive_lock(true, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::discard( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace, + uint64_t tid, std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::write_same( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::compare_and_write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents + << dendl; + + if (needs_exclusive_lock(false, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::flush( + io::AioCompletion* aio_comp, io::FlushSource flush_source, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) { + auto cct = m_image_ctx->cct; + ldout(cct, 20) << "tid=" << tid << dendl; + + // don't attempt to grab the exclusive lock if were are just internally + // clearing out our in-flight IO queue + if (flush_source != io::FLUSH_SOURCE_USER) { + return false; + } + + if (needs_exclusive_lock(false, dispatch_result, on_dispatched)) { + return true; + } + + return false; +} + +template +bool ImageDispatch::is_lock_required(bool read_op) const { + ceph_assert(ceph_mutex_is_locked(m_lock)); + return ((read_op && m_require_lock_on_read) || + (!read_op && m_require_lock_on_write)); +} + +template +bool ImageDispatch::needs_exclusive_lock(bool read_op, + io::DispatchResult* dispatch_result, + Context* on_dispatched) { + auto cct = m_image_ctx->cct; + bool lock_required = false; + { + std::shared_lock locker{m_lock}; + lock_required = is_lock_required(read_op); + } + + if (lock_required) { + std::shared_lock owner_locker{m_image_ctx->owner_lock}; + if (m_image_ctx->exclusive_lock == nullptr) { + // raced with the exclusive lock being disabled + return false; + } + + ldout(cct, 5) << "exclusive lock required: delaying IO" << dendl; + if (!m_image_ctx->get_exclusive_lock_policy()->may_auto_request_lock()) { + lderr(cct) << "op requires exclusive lock" << dendl; + + *dispatch_result = io::DISPATCH_RESULT_CONTINUE; + on_dispatched->complete( + m_image_ctx->exclusive_lock->get_unlocked_op_error()); + return true; + } + + // block potential races with other incoming IOs + std::unique_lock locker{m_lock}; + bool retesting_lock = ( + !m_on_dispatches.empty() && m_on_dispatches.front() == on_dispatched); + if (!m_on_dispatches.empty() && !retesting_lock) { + *dispatch_result = io::DISPATCH_RESULT_RESTART; + m_on_dispatches.push_back(on_dispatched); + return true; + } + + if (!is_lock_required(read_op)) { + return false; + } + + ceph_assert(m_on_dispatches.empty() || retesting_lock); + m_on_dispatches.push_back(on_dispatched); + + *dispatch_result = io::DISPATCH_RESULT_RESTART; + auto ctx = create_context_callback< + ImageDispatch, &ImageDispatch::handle_acquire_lock>(this); + m_image_ctx->exclusive_lock->acquire_lock(ctx); + return true; + } + + return false; +} + +template +void ImageDispatch::handle_acquire_lock(int r) { + auto cct = m_image_ctx->cct; + ldout(cct, 5) << "r=" << r << dendl; + + std::unique_lock locker{m_lock}; + ceph_assert(!m_on_dispatches.empty()); + + Context* failed_dispatch = nullptr; + Contexts on_dispatches; + if (r < 0) { + lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r) + << dendl; + failed_dispatch = m_on_dispatches.front(); + m_on_dispatches.pop_front(); + } else { + // re-test is lock is still required (i.e. it wasn't acquired) via a restart + // dispatch + std::swap(on_dispatches, m_on_dispatches); + } + locker.unlock(); + + if (failed_dispatch != nullptr) { + failed_dispatch->complete(r); + } + for (auto ctx : on_dispatches) { + ctx->complete(0); + } +} + +} // namespace exclusive_lock +} // namespace librbd + +template class librbd::exclusive_lock::ImageDispatch; diff --git a/src/librbd/exclusive_lock/ImageDispatch.h b/src/librbd/exclusive_lock/ImageDispatch.h new file mode 100644 index 0000000000000..f7c5b0b9dbc84 --- /dev/null +++ b/src/librbd/exclusive_lock/ImageDispatch.h @@ -0,0 +1,110 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H +#define CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H + +#include "librbd/io/ImageDispatchInterface.h" +#include "include/int_types.h" +#include "include/buffer.h" +#include "common/ceph_mutex.h" +#include "common/zipkin_trace.h" +#include "librbd/io/ReadResult.h" +#include "librbd/io/Types.h" +#include + +struct Context; + +namespace librbd { + +struct ImageCtx; + +namespace io { struct AioCompletion; } + +namespace exclusive_lock { + +template +class ImageDispatch : public io::ImageDispatchInterface { +public: + static ImageDispatch* create(ImageCtxT* image_ctx) { + return new ImageDispatch(image_ctx); + } + void destroy() { + delete this; + } + + ImageDispatch(ImageCtxT* image_ctx); + + io::ImageDispatchLayer get_dispatch_layer() const override { + return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK; + } + + void set_require_lock(io::Direction direction, Context* on_finish); + void unset_require_lock(io::Direction direction); + + void shut_down(Context* on_finish) override; + + bool read( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + io::ReadResult &&read_result, int op_flags, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + bool write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + bool discard( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + uint32_t discard_granularity_bytes, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + bool write_same( + io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + bool compare_and_write( + io::AioCompletion* aio_comp, io::Extents &&image_extents, + bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + bool flush( + io::AioCompletion* aio_comp, io::FlushSource flush_source, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + io::DispatchResult* dispatch_result, Context* on_dispatched) override; + + void handle_finished(int r, uint64_t tid) override {} + +private: + typedef std::list Contexts; + + ImageCtxT* m_image_ctx; + mutable ceph::shared_mutex m_lock; + + bool m_require_lock_on_read = false; + bool m_require_lock_on_write = false; + + Contexts m_on_dispatches; + + bool set_require_lock(io::Direction direction, bool enabled); + + bool is_lock_required(bool read_op) const; + + bool needs_exclusive_lock(bool read_op, io::DispatchResult* dispatch_result, + Context* on_dispatched); + + void handle_acquire_lock(int r); + +}; + +} // namespace exclusiv_lock +} // namespace librbd + +extern template class librbd::exclusive_lock::ImageDispatch; + +#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_IMAGE_DISPATCH_H diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.cc b/src/librbd/exclusive_lock/PreReleaseRequest.cc index 571af20949330..a0227670fe82f 100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.cc +++ b/src/librbd/exclusive_lock/PreReleaseRequest.cc @@ -11,6 +11,7 @@ #include "librbd/Journal.h" #include "librbd/ObjectMap.h" #include "librbd/Utils.h" +#include "librbd/exclusive_lock/ImageDispatch.h" #include "librbd/io/ImageRequestWQ.h" #include "librbd/io/ObjectDispatcherInterface.h" @@ -27,18 +28,20 @@ using util::create_context_callback; template PreReleaseRequest* PreReleaseRequest::create( - I &image_ctx, bool shutting_down, AsyncOpTracker &async_op_tracker, - Context *on_finish) { - return new PreReleaseRequest(image_ctx, shutting_down, async_op_tracker, - on_finish); + I &image_ctx, ImageDispatch* image_dispatch, bool shutting_down, + AsyncOpTracker &async_op_tracker, Context *on_finish) { + return new PreReleaseRequest(image_ctx, image_dispatch, shutting_down, + async_op_tracker, on_finish); } template -PreReleaseRequest::PreReleaseRequest(I &image_ctx, bool shutting_down, +PreReleaseRequest::PreReleaseRequest(I &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish) - : m_image_ctx(image_ctx), m_shutting_down(shutting_down), - m_async_op_tracker(async_op_tracker), + : m_image_ctx(image_ctx), m_image_dispatch(image_dispatch), + m_shutting_down(shutting_down), m_async_op_tracker(async_op_tracker), m_on_finish(create_async_context_callback(image_ctx, on_finish)) { } @@ -108,17 +111,13 @@ void PreReleaseRequest::send_block_writes() { Context *ctx = create_context_callback< klass, &klass::handle_block_writes>(this); - { - std::shared_lock owner_locker{m_image_ctx.owner_lock}; - // setting the lock as required will automatically cause the IO - // queue to re-request the lock if any IO is queued - if (m_image_ctx.clone_copy_on_read || - m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true); - } else { - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true); - } - m_image_ctx.io_work_queue->block_writes(ctx); + // setting the lock as required will automatically cause the IO + // queue to re-request the lock if any IO is queued + if (m_image_ctx.clone_copy_on_read || + m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { + m_image_dispatch->set_require_lock(io::DIRECTION_BOTH, ctx); + } else { + m_image_dispatch->set_require_lock(io::DIRECTION_WRITE, ctx); } } @@ -133,7 +132,7 @@ void PreReleaseRequest::handle_block_writes(int r) { << dendl; } else if (r < 0) { lderr(cct) << "failed to block writes: " << cpp_strerror(r) << dendl; - m_image_ctx.io_work_queue->unblock_writes(); + m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); save_result(r); finish(); return; @@ -180,7 +179,7 @@ void PreReleaseRequest::handle_invalidate_cache(int r) { if (r < 0 && r != -EBLACKLISTED && r != -EBUSY) { lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) << dendl; - m_image_ctx.io_work_queue->unblock_writes(); + m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH); save_result(r); finish(); return; diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.h b/src/librbd/exclusive_lock/PreReleaseRequest.h index e5b85a8813b21..66d1b0155b0d4 100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.h +++ b/src/librbd/exclusive_lock/PreReleaseRequest.h @@ -16,10 +16,14 @@ struct ImageCtx; namespace exclusive_lock { +template struct ImageDispatch; + template class PreReleaseRequest { public: - static PreReleaseRequest* create(ImageCtxT &image_ctx, bool shutting_down, + static PreReleaseRequest* create(ImageCtxT &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, AsyncOpTracker &async_op_tracker, Context *on_finish); @@ -62,10 +66,13 @@ private: * @endverbatim */ - PreReleaseRequest(ImageCtxT &image_ctx, bool shutting_down, - AsyncOpTracker &async_op_tracker, Context *on_finish); + PreReleaseRequest(ImageCtxT &image_ctx, + ImageDispatch* image_dispatch, + bool shutting_down, AsyncOpTracker &async_op_tracker, + Context *on_finish); ImageCtxT &m_image_ctx; + ImageDispatch* m_image_dispatch; bool m_shutting_down; AsyncOpTracker &m_async_op_tracker; Context *m_on_finish; diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc index eea4fd1dc43d6..f7c4ef4df80a6 100644 --- a/src/librbd/image/RefreshRequest.cc +++ b/src/librbd/image/RefreshRequest.cc @@ -902,9 +902,14 @@ void RefreshRequest::send_v2_open_journal() { !journal_disabled_by_policy && m_image_ctx.exclusive_lock != nullptr && m_image_ctx.journal == nullptr) { - m_image_ctx.io_work_queue->set_require_lock(librbd::io::DIRECTION_BOTH, - true); + auto ctx = new LambdaContext([this](int) { + send_v2_block_writes(); + }); + m_image_ctx.exclusive_lock->set_require_lock( + librbd::io::DIRECTION_BOTH, ctx); + return; } + send_v2_block_writes(); return; } @@ -1399,8 +1404,7 @@ void RefreshRequest::apply() { if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING, m_image_ctx.image_lock)) { if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) { - m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_READ, - false); + m_image_ctx.exclusive_lock->unset_require_lock(io::DIRECTION_READ); } std::swap(m_journal, m_image_ctx.journal); } else if (m_journal != nullptr) { diff --git a/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc b/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc index 38110a33d0acd..9812a31b818d4 100644 --- a/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc +++ b/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc @@ -9,17 +9,38 @@ #include "test/librbd/mock/io/MockObjectDispatch.h" #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" #include "common/AsyncOpTracker.h" +#include "librbd/exclusive_lock/ImageDispatch.h" #include "librbd/exclusive_lock/PreReleaseRequest.h" #include "gmock/gmock.h" #include "gtest/gtest.h" #include +namespace librbd { + +namespace { + +struct MockTestImageCtx : public MockImageCtx { + MockTestImageCtx(ImageCtx& image_ctx) : MockImageCtx(image_ctx) { + } +}; + +} // anonymous namespace + +namespace exclusive_lock { + +template <> +struct ImageDispatch { + MOCK_METHOD2(set_require_lock, void(io::Direction, Context*)); + MOCK_METHOD1(unset_require_lock, void(io::Direction)); +}; + +} // namespace exclusive_lock +} // namespace librbd + // template definitions #include "librbd/exclusive_lock/PreReleaseRequest.cc" -template class librbd::exclusive_lock::PreReleaseRequest; namespace librbd { - namespace exclusive_lock { namespace { @@ -42,78 +63,80 @@ static const std::string TEST_COOKIE("auto 123"); class TestMockExclusiveLockPreReleaseRequest : public TestMockFixture { public: - typedef PreReleaseRequest MockPreReleaseRequest; + typedef ImageDispatch MockImageDispatch; + typedef PreReleaseRequest MockPreReleaseRequest; void expect_complete_context(MockContext &mock_context, int r) { EXPECT_CALL(mock_context, complete(r)); } - void expect_test_features(MockImageCtx &mock_image_ctx, uint64_t features, + void expect_test_features(MockTestImageCtx &mock_image_ctx, uint64_t features, bool enabled) { EXPECT_CALL(mock_image_ctx, test_features(features)) .WillOnce(Return(enabled)); } - void expect_set_require_lock(MockImageCtx &mock_image_ctx, - librbd::io::Direction direction, bool enabled) { - EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction, - enabled)); + void expect_set_require_lock(MockImageDispatch &mock_image_dispatch, + librbd::io::Direction direction, int r) { + EXPECT_CALL(mock_image_dispatch, set_require_lock(direction, _)) + .WillOnce(WithArg<1>(Invoke([r](Context* ctx) { ctx->complete(r); }))); } - void expect_block_writes(MockImageCtx &mock_image_ctx, int r) { + void expect_set_require_lock(MockTestImageCtx &mock_image_ctx, + MockImageDispatch &mock_image_dispatch, int r) { expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING, ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0)); if (mock_image_ctx.clone_copy_on_read || (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { - expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_BOTH, true); + expect_set_require_lock(mock_image_dispatch, librbd::io::DIRECTION_BOTH, + r); } else { - expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_WRITE, - true); + expect_set_require_lock(mock_image_dispatch, librbd::io::DIRECTION_WRITE, + r); } - EXPECT_CALL(*mock_image_ctx.io_work_queue, block_writes(_)) - .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_unblock_writes(MockImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.io_work_queue, unblock_writes()); + void expect_unset_require_lock(MockImageDispatch &mock_image_dispatch) { + EXPECT_CALL(mock_image_dispatch, unset_require_lock( + io::DIRECTION_BOTH)); } - void expect_cancel_op_requests(MockImageCtx &mock_image_ctx, int r) { + void expect_cancel_op_requests(MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(mock_image_ctx, cancel_async_requests(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_close_journal(MockImageCtx &mock_image_ctx, + void expect_close_journal(MockTestImageCtx &mock_image_ctx, MockJournal &mock_journal, int r) { EXPECT_CALL(mock_journal, close(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_close_object_map(MockImageCtx &mock_image_ctx, + void expect_close_object_map(MockTestImageCtx &mock_image_ctx, MockObjectMap &mock_object_map) { EXPECT_CALL(mock_object_map, close(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_invalidate_cache(MockImageCtx &mock_image_ctx, + void expect_invalidate_cache(MockTestImageCtx &mock_image_ctx, int r) { EXPECT_CALL(*mock_image_ctx.io_object_dispatcher, invalidate_cache(_)) .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_flush_notifies(MockImageCtx &mock_image_ctx) { + void expect_flush_notifies(MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_)) .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); } - void expect_prepare_lock(MockImageCtx &mock_image_ctx) { + void expect_prepare_lock(MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_)) .WillOnce(Invoke([](Context *on_ready) { on_ready->complete(0); })); } - void expect_handle_prepare_lock_complete(MockImageCtx &mock_image_ctx) { + void expect_handle_prepare_lock_complete(MockTestImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete()); } @@ -126,14 +149,15 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, Success) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_prepare_lock(mock_image_ctx); expect_cancel_op_requests(mock_image_ctx, 0); - expect_block_writes(mock_image_ctx, 0); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, 0); expect_invalidate_cache(mock_image_ctx, 0); expect_flush_notifies(mock_image_ctx); @@ -150,7 +174,7 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, Success) { C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, false, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, false, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } @@ -161,9 +185,10 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessJournalDisabled) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); - expect_block_writes(mock_image_ctx, 0); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, 0); expect_op_work_queue(mock_image_ctx); InSequence seq; @@ -181,7 +206,7 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessJournalDisabled) { C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, false, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, false, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } @@ -192,9 +217,10 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessObjectMapDisabled) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); - expect_block_writes(mock_image_ctx, 0); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, 0); expect_op_work_queue(mock_image_ctx); InSequence seq; @@ -206,7 +232,7 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessObjectMapDisabled) { C_SaferCond release_ctx; C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, true, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, true, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } @@ -217,13 +243,14 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, Blacklisted) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_prepare_lock(mock_image_ctx); expect_cancel_op_requests(mock_image_ctx, 0); - expect_block_writes(mock_image_ctx, -EBLACKLISTED); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, -EBLACKLISTED); expect_invalidate_cache(mock_image_ctx, -EBLACKLISTED); expect_flush_notifies(mock_image_ctx); @@ -240,7 +267,7 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, Blacklisted) { C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, false, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, false, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } @@ -251,18 +278,19 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, BlockWritesError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_cancel_op_requests(mock_image_ctx, 0); - expect_block_writes(mock_image_ctx, -EINVAL); - expect_unblock_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, -EINVAL); + expect_unset_require_lock(mock_image_dispatch); C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, true, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, true, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(-EINVAL, ctx.wait()); } @@ -273,20 +301,21 @@ TEST_F(TestMockExclusiveLockPreReleaseRequest, UnlockError) { librbd::ImageCtx *ictx; ASSERT_EQ(0, open_image(m_image_name, &ictx)); - MockImageCtx mock_image_ctx(*ictx); + MockTestImageCtx mock_image_ctx(*ictx); expect_op_work_queue(mock_image_ctx); InSequence seq; expect_cancel_op_requests(mock_image_ctx, 0); - expect_block_writes(mock_image_ctx, 0); + MockImageDispatch mock_image_dispatch; + expect_set_require_lock(mock_image_ctx, mock_image_dispatch, 0); expect_invalidate_cache(mock_image_ctx, 0); expect_flush_notifies(mock_image_ctx); C_SaferCond ctx; MockPreReleaseRequest *req = MockPreReleaseRequest::create( - mock_image_ctx, true, m_async_op_tracker, &ctx); + mock_image_ctx, &mock_image_dispatch, true, m_async_op_tracker, &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); } diff --git a/src/test/librbd/image/test_mock_RefreshRequest.cc b/src/test/librbd/image/test_mock_RefreshRequest.cc index ecec4586cf57f..50c60cb563890 100644 --- a/src/test/librbd/image/test_mock_RefreshRequest.cc +++ b/src/test/librbd/image/test_mock_RefreshRequest.cc @@ -183,10 +183,15 @@ public: ASSERT_EQ(0, ictx->md_ctx.write(ictx->header_oid, hdr, hdr.length(), 0)); } - void expect_set_require_lock(MockRefreshImageCtx &mock_image_ctx, - librbd::io::Direction direction, bool enabled) { - EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction, - enabled)); + void expect_set_require_lock(MockExclusiveLock &mock_exclusive_lock, + librbd::io::Direction direction) { + EXPECT_CALL(mock_exclusive_lock, set_require_lock(direction, _)) + .WillOnce(WithArg<1>(Invoke([](Context* ctx) { ctx->complete(0); }))); + } + + void expect_unset_require_lock(MockExclusiveLock &mock_exclusive_lock, + librbd::io::Direction direction) { + EXPECT_CALL(mock_exclusive_lock, unset_require_lock(direction)); } void expect_v1_read_header(MockRefreshImageCtx &mock_image_ctx, int r) { @@ -1163,7 +1168,7 @@ TEST_F(TestMockImageRefreshRequest, EnableJournalWithoutExclusiveLock) { expect_apply_metadata(mock_image_ctx, 0); expect_get_group(mock_image_ctx, 0); expect_refresh_parent_is_required(mock_refresh_parent_request, false); - expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_BOTH, true); + expect_set_require_lock(mock_exclusive_lock, librbd::io::DIRECTION_BOTH); C_SaferCond ctx; MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx); @@ -1216,13 +1221,14 @@ TEST_F(TestMockImageRefreshRequest, DisableJournal) { expect_refresh_parent_is_required(mock_refresh_parent_request, false); expect_block_writes(mock_image_ctx, 0); if (!mock_image_ctx.clone_copy_on_read) { - expect_set_require_lock(mock_image_ctx, librbd::io::DIRECTION_READ, false); + expect_unset_require_lock(mock_exclusive_lock, librbd::io::DIRECTION_READ); } expect_close_journal(mock_image_ctx, mock_journal, 0); expect_unblock_writes(mock_image_ctx); C_SaferCond ctx; - MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, &ctx); + MockRefreshRequest *req = new MockRefreshRequest(mock_image_ctx, false, false, + &ctx); req->send(); ASSERT_EQ(0, ctx.wait()); diff --git a/src/test/librbd/mock/MockExclusiveLock.h b/src/test/librbd/mock/MockExclusiveLock.h index 2bf552f3b6899..6df6dbb1e2d35 100644 --- a/src/test/librbd/mock/MockExclusiveLock.h +++ b/src/test/librbd/mock/MockExclusiveLock.h @@ -8,6 +8,7 @@ #include "include/int_types.h" #include "include/rados/librados.hpp" #include "librbd/exclusive_lock/Policy.h" +#include "librbd/io/Types.h" #include "gmock/gmock.h" class Context; @@ -34,6 +35,9 @@ struct MockExclusiveLock { MOCK_METHOD0(accept_ops, bool()); MOCK_METHOD0(get_unlocked_op_error, int()); + MOCK_METHOD2(set_require_lock, void(io::Direction, Context*)); + MOCK_METHOD1(unset_require_lock, void(io::Direction)); + MOCK_METHOD1(start_op, Context*(int*)); void get() {} diff --git a/src/test/librbd/mock/io/MockImageDispatch.h b/src/test/librbd/mock/io/MockImageDispatch.h new file mode 100644 index 0000000000000..80afcec563320 --- /dev/null +++ b/src/test/librbd/mock/io/MockImageDispatch.h @@ -0,0 +1,81 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#ifndef CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H +#define CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H + +#include "gmock/gmock.h" +#include "include/Context.h" +#include "librbd/io/ImageDispatchInterface.h" +#include "librbd/io/Types.h" + +class Context; + +namespace librbd { +namespace io { + +struct MockImageDispatch : public ImageDispatchInterface { +public: + MOCK_CONST_METHOD0(get_dispatch_layer, ImageDispatchLayer()); + + MOCK_METHOD1(shut_down, void(Context*)); + + bool read( + AioCompletion* aio_comp, Extents &&image_extents, + ReadResult &&read_result, int op_flags, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + bool write( + AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + bool discard( + AioCompletion* aio_comp, Extents &&image_extents, + uint32_t discard_granularity_bytes, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + bool write_same( + AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&bl, + int op_flags, const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + bool compare_and_write( + AioCompletion* aio_comp, Extents &&image_extents, bufferlist &&cmp_bl, + bufferlist &&bl, uint64_t *mismatch_offset, int op_flags, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + bool flush( + AioCompletion* aio_comp, FlushSource flush_source, + const ZTracer::Trace &parent_trace, uint64_t tid, + std::atomic* image_dispatch_flags, + DispatchResult* dispatch_result, Context* on_dispatched) override { + return false; + } + + void handle_finished(int r, uint64_t tid) override {}; + +}; + +} // namespace io +} // namespace librbd + +#endif // CEPH_TEST_LIBRBD_MOCK_IO_IMAGE_DISPATCH_H diff --git a/src/test/librbd/test_mock_ExclusiveLock.cc b/src/test/librbd/test_mock_ExclusiveLock.cc index ae523acc25579..767e4492a99eb 100644 --- a/src/test/librbd/test_mock_ExclusiveLock.cc +++ b/src/test/librbd/test_mock_ExclusiveLock.cc @@ -5,8 +5,10 @@ #include "test/librbd/test_support.h" #include "test/librbd/mock/MockImageCtx.h" #include "test/librbd/mock/exclusive_lock/MockPolicy.h" +#include "test/librbd/mock/io/MockImageDispatch.h" #include "librbd/ExclusiveLock.h" #include "librbd/ManagedLock.h" +#include "librbd/exclusive_lock/ImageDispatch.h" #include "librbd/exclusive_lock/PreAcquireRequest.h" #include "librbd/exclusive_lock/PostAcquireRequest.h" #include "librbd/exclusive_lock/PreReleaseRequest.h" @@ -109,6 +111,33 @@ struct BaseRequest { template std::list BaseRequest::s_requests; +template<> +struct ImageDispatch + : public librbd::io::MockImageDispatch { + static ImageDispatch* s_instance; + static ImageDispatch* create(MockExclusiveLockImageCtx*) { + ceph_assert(s_instance != nullptr); + return s_instance; + } + + void destroy() { + } + + ImageDispatch() { + s_instance = this; + } + + io::ImageDispatchLayer get_dispatch_layer() const override { + return io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK; + } + + MOCK_METHOD2(set_require_lock, void(io::Direction, Context*)); + MOCK_METHOD1(unset_require_lock, void(io::Direction)); + +}; + +ImageDispatch* ImageDispatch::s_instance = nullptr; + template <> struct PreAcquireRequest : public BaseRequest > { static PreAcquireRequest *create( @@ -126,8 +155,10 @@ struct PostAcquireRequest : public BaseRequest struct PreReleaseRequest : public BaseRequest > { static PreReleaseRequest *create( - MockExclusiveLockImageCtx &image_ctx, bool shutting_down, - AsyncOpTracker &async_op_tracker, Context *on_finish) { + MockExclusiveLockImageCtx &image_ctx, + ImageDispatch* ImageDispatch, + bool shutting_down, AsyncOpTracker &async_op_tracker, + Context *on_finish) { return BaseRequest::create(image_ctx, nullptr, on_finish); } MOCK_METHOD0(send, void()); @@ -156,15 +187,18 @@ using ::testing::DoAll; using ::testing::Invoke; using ::testing::InSequence; using ::testing::Return; +using ::testing::WithArg; class TestMockExclusiveLock : public TestMockFixture { public: typedef ManagedLock MockManagedLock; typedef ExclusiveLock MockExclusiveLock; + typedef exclusive_lock::ImageDispatch MockImageDispatch; typedef exclusive_lock::PreAcquireRequest MockPreAcquireRequest; typedef exclusive_lock::PostAcquireRequest MockPostAcquireRequest; typedef exclusive_lock::PreReleaseRequest MockPreReleaseRequest; + void expect_set_state_initializing(MockManagedLock *managed_lock) { EXPECT_CALL(*managed_lock, set_state_initializing()); } @@ -219,28 +253,47 @@ public: .WillOnce(Return(ret_val)); } - void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx, - io::Direction direction, bool enabled) { - EXPECT_CALL(*mock_image_ctx.io_work_queue, set_require_lock(direction, - enabled)); + void expect_set_require_lock(MockImageDispatch &mock_image_dispatch, + io::Direction direction) { + EXPECT_CALL(mock_image_dispatch, set_require_lock(direction, _)) + .WillOnce(WithArg<1>(Invoke([](Context* ctx) { ctx->complete(0); }))); } - void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx) { - EXPECT_CALL(*mock_image_ctx.io_work_queue, block_writes(_)) - .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); + void expect_set_require_lock(MockExclusiveLockImageCtx &mock_image_ctx, + MockImageDispatch &mock_image_dispatch) { if (mock_image_ctx.clone_copy_on_read || (mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { - expect_set_require_lock(mock_image_ctx, io::DIRECTION_BOTH, true); + expect_set_require_lock(mock_image_dispatch, io::DIRECTION_BOTH); } else { - expect_set_require_lock(mock_image_ctx, io::DIRECTION_WRITE, true); + expect_set_require_lock(mock_image_dispatch, io::DIRECTION_WRITE); } } + void expect_unset_require_lock(MockImageDispatch &mock_image_dispatch) { + EXPECT_CALL(mock_image_dispatch, unset_require_lock(io::DIRECTION_BOTH)); + } + + void expect_block_writes(MockExclusiveLockImageCtx &mock_image_ctx, + MockImageDispatch& mock_image_dispatch) { + EXPECT_CALL(*mock_image_ctx.io_work_queue, block_writes(_)) + .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); + } + void expect_unblock_writes(MockExclusiveLockImageCtx &mock_image_ctx) { - expect_set_require_lock(mock_image_ctx, io::DIRECTION_BOTH, false); EXPECT_CALL(*mock_image_ctx.io_work_queue, unblock_writes()); } + void expect_register_dispatch(MockExclusiveLockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, register_dispatch(_)); + } + + void expect_shut_down_dispatch(MockExclusiveLockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.io_image_dispatcher, shut_down_dispatch(_, _)) + .WillOnce(WithArg<1>(Invoke([](Context* ctx) { + ctx->complete(0); + }))); + } + void expect_prepare_lock_complete(MockExclusiveLockImageCtx &mock_image_ctx) { EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete()); } @@ -376,7 +429,11 @@ TEST_F(TestMockExclusiveLock, StateTransitions) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -389,7 +446,7 @@ TEST_F(TestMockExclusiveLock, StateTransitions) { expect_post_acquire_request(exclusive_lock, try_lock_post_acquire, 0); expect_is_state_acquiring(exclusive_lock, true); expect_notify_acquired_lock(mock_image_ctx); - expect_unblock_writes(mock_image_ctx); + expect_unset_require_lock(mock_image_dispatch); ASSERT_EQ(0, when_post_acquire_lock_handler(exclusive_lock, 0)); // release lock @@ -411,7 +468,7 @@ TEST_F(TestMockExclusiveLock, StateTransitions) { expect_post_acquire_request(exclusive_lock, request_lock_post_acquire, 0); expect_is_state_acquiring(exclusive_lock, true); expect_notify_acquired_lock(mock_image_ctx); - expect_unblock_writes(mock_image_ctx); + expect_unset_require_lock(mock_image_dispatch); ASSERT_EQ(0, when_post_acquire_lock_handler(exclusive_lock, 0)); // shut down (and release) @@ -423,7 +480,7 @@ TEST_F(TestMockExclusiveLock, StateTransitions) { expect_pre_release_request(shutdown_pre_release, 0); ASSERT_EQ(0, when_pre_release_lock_handler(exclusive_lock, true)); - expect_unblock_writes(mock_image_ctx); + expect_shut_down_dispatch(mock_image_ctx); expect_notify_released_lock(mock_image_ctx); ASSERT_EQ(0, when_post_release_lock_handler(exclusive_lock, true, 0)); } @@ -443,7 +500,11 @@ TEST_F(TestMockExclusiveLock, TryLockAlreadyLocked) { } BOOST_SCOPE_EXIT_END expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -474,7 +535,11 @@ TEST_F(TestMockExclusiveLock, TryLockError) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -505,7 +570,11 @@ TEST_F(TestMockExclusiveLock, AcquireLockAlreadyLocked) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -539,7 +608,11 @@ TEST_F(TestMockExclusiveLock, AcquireLockBusy) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -573,7 +646,11 @@ TEST_F(TestMockExclusiveLock, AcquireLockError) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -605,7 +682,11 @@ TEST_F(TestMockExclusiveLock, PostAcquireLockError) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -637,7 +718,11 @@ TEST_F(TestMockExclusiveLock, PreReleaseLockError) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -667,7 +752,11 @@ TEST_F(TestMockExclusiveLock, ReacquireLock) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); @@ -680,7 +769,7 @@ TEST_F(TestMockExclusiveLock, ReacquireLock) { expect_post_acquire_request(exclusive_lock, try_lock_post_acquire, 0); expect_is_state_acquiring(exclusive_lock, true); expect_notify_acquired_lock(mock_image_ctx); - expect_unblock_writes(mock_image_ctx); + expect_unset_require_lock(mock_image_dispatch); ASSERT_EQ(0, when_post_acquire_lock_handler(exclusive_lock, 0)); // reacquire lock @@ -696,7 +785,7 @@ TEST_F(TestMockExclusiveLock, ReacquireLock) { expect_pre_release_request(shutdown_pre_release, 0); ASSERT_EQ(0, when_pre_release_lock_handler(exclusive_lock, true)); - expect_unblock_writes(mock_image_ctx); + expect_shut_down_dispatch(mock_image_ctx); expect_notify_released_lock(mock_image_ctx); ASSERT_EQ(0, when_post_release_lock_handler(exclusive_lock, true, 0)); } @@ -720,7 +809,11 @@ TEST_F(TestMockExclusiveLock, BlockRequests) { InSequence seq; expect_set_state_initializing(exclusive_lock); - expect_block_writes(mock_image_ctx); + MockImageDispatch mock_image_dispatch; + expect_block_writes(mock_image_ctx, mock_image_dispatch); + expect_register_dispatch(mock_image_ctx); + expect_set_require_lock(mock_image_ctx, mock_image_dispatch); + expect_unblock_writes(mock_image_ctx); expect_set_state_unlocked(exclusive_lock); ASSERT_EQ(0, when_init(mock_image_ctx, exclusive_lock)); -- 2.39.5