From: Jason Dillaman Date: Mon, 9 Jan 2017 01:34:23 +0000 (-0500) Subject: librbd: merge managed lock refactor X-Git-Tag: v12.0.0~225^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=1b2b637bb5bb8f07e44ac3ba7a7b1c7842793b1e;p=ceph.git librbd: merge managed lock refactor Signed-off-by: Jason Dillaman --- 1b2b637bb5bb8f07e44ac3ba7a7b1c7842793b1e diff --cc src/librbd/CMakeLists.txt index 6a10ce78261e,cb72553b2ccd..a3f6167e7b83 --- a/src/librbd/CMakeLists.txt +++ b/src/librbd/CMakeLists.txt @@@ -22,19 -22,23 +22,25 @@@ set(librbd_internal_src Journal.cc LibrbdAdminSocketHook.cc LibrbdWriteback.cc ++ ManagedLock.cc MirroringWatcher.cc ObjectMap.cc - ObjectWatcher.cc Operations.cc Utils.cc cache/ImageWriteback.cc cache/PassthroughImageCache.cc - exclusive_lock/AcquireRequest.cc + Watcher.cc + watcher/Types.cc + watcher/RewatchRequest.cc + managed_lock/AcquireRequest.cc ++ managed_lock/BreakRequest.cc ++ managed_lock/GetLockerRequest.cc + managed_lock/ReleaseRequest.cc + managed_lock/ReacquireRequest.cc - ManagedLock.cc + exclusive_lock/AutomaticPolicy.cc - exclusive_lock/BreakRequest.cc - exclusive_lock/GetLockerRequest.cc - exclusive_lock/ReacquireRequest.cc - exclusive_lock/ReleaseRequest.cc + exclusive_lock/PreAcquireRequest.cc + exclusive_lock/PostAcquireRequest.cc + exclusive_lock/PreReleaseRequest.cc - exclusive_lock/AutomaticPolicy.cc exclusive_lock/StandardPolicy.cc image/CloseRequest.cc image/CreateRequest.cc diff --cc src/librbd/ExclusiveLock.cc index b8d740a94908,f0eed9c03ee1..06795bff255d --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@@ -132,54 -89,24 +89,26 @@@ void ExclusiveLock::init(uint64_t fe template void ExclusiveLock::shut_down(Context *on_shut_down) { - ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl; + ldout(m_image_ctx.cct, 10) << dendl; - { - Mutex::Locker locker(m_lock); - assert(!is_shutdown()); - execute_action(ACTION_SHUT_DOWN, on_shut_down); - } + ML::shut_down(on_shut_down); // if stalled in request state machine -- abort - handle_peer_notification(); + handle_peer_notification(0); } template - void ExclusiveLock::try_lock(Context *on_tried_lock) { - int r = 0; - { - Mutex::Locker locker(m_lock); - assert(m_image_ctx.owner_lock.is_locked()); - if (is_shutdown()) { - r = -ESHUTDOWN; - } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { - ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl; - execute_action(ACTION_TRY_LOCK, on_tried_lock); - return; - } -void ExclusiveLock::handle_peer_notification() { ++void ExclusiveLock::handle_peer_notification(int r) { + Mutex::Locker locker(ML::m_lock); + if (ML::m_state != ML::STATE_WAITING_FOR_LOCK) { + return; } - on_tried_lock->complete(r); - } + ldout(m_image_ctx.cct, 10) << dendl; + assert(ML::get_active_action() == ML::ACTION_ACQUIRE_LOCK); + - template - void ExclusiveLock::request_lock(Context *on_locked) { - int r = 0; - { - Mutex::Locker locker(m_lock); - assert(m_image_ctx.owner_lock.is_locked()); - if (is_shutdown()) { - r = -ESHUTDOWN; - } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { - ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl; - execute_action(ACTION_REQUEST_LOCK, on_locked); - return; - } - } - - if (on_locked != nullptr) { - on_locked->complete(r); - } ++ m_acquire_lock_peer_ret_val = r; + ML::execute_next_action(); } template @@@ -233,183 -132,43 +134,57 @@@ void ExclusiveLock::shutdown_handler } template - void ExclusiveLock::handle_peer_notification(int r) { + void ExclusiveLock::pre_acquire_lock_handler(Context *on_finish) { ++ int acquire_lock_peer_ret_val = 0; + { - Mutex::Locker locker(m_lock); - if (m_state != STATE_WAITING_FOR_PEER) { - return; - } - - ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl; - assert(get_active_action() == ACTION_REQUEST_LOCK); ++ Mutex::Locker locker(ML::m_lock); ++ std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val); ++ } + - if (r >= 0) { - execute_next_action(); - return; - } ++ if (acquire_lock_peer_ret_val == -EROFS) { ++ ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl; ++ on_finish->complete(acquire_lock_peer_ret_val); ++ return; + } + - handle_acquire_lock(r); + PreAcquireRequest *req = PreAcquireRequest::create(m_image_ctx, + on_finish); + m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) { + req->send(); + })); } template - std::string ExclusiveLock::encode_lock_cookie() const { - assert(m_lock.is_locked()); + void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { + ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl; - assert(m_watch_handle != 0); - std::ostringstream ss; - ss << WATCHER_LOCK_COOKIE_PREFIX << " " << m_watch_handle; - return ss.str(); - } -- - template - bool ExclusiveLock::decode_lock_cookie(const std::string &tag, - uint64_t *handle) { - std::string prefix; - std::istringstream ss(tag); - if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) { - return false; - } - return true; - } - if (r < 0) { ++ if (r == -EROFS) { ++ // peer refused to release the exclusive lock ++ on_finish->complete(r); ++ return; ++ } else if (r < 0) { + ML::m_lock.Lock(); + assert(ML::m_state == ML::STATE_ACQUIRING); - template - bool ExclusiveLock::is_transition_state() const { - switch (m_state) { - case STATE_INITIALIZING: - case STATE_ACQUIRING: - case STATE_WAITING_FOR_PEER: - case STATE_WAITING_FOR_REGISTER: - case STATE_POST_ACQUIRING: - case STATE_REACQUIRING: - case STATE_PRE_RELEASING: - case STATE_RELEASING: - case STATE_PRE_SHUTTING_DOWN: - case STATE_SHUTTING_DOWN: - return true; - case STATE_UNINITIALIZED: - case STATE_UNLOCKED: - case STATE_LOCKED: - case STATE_SHUTDOWN: - break; - } - return false; - } + // PostAcquire state machine will not run, so we need complete prepare + m_image_ctx.state->handle_prepare_lock_complete(); - template - void ExclusiveLock::append_context(Action action, Context *ctx) { - assert(m_lock.is_locked()); + typename ML::Action action = ML::get_active_action(); + if (action == ML::ACTION_ACQUIRE_LOCK && r < 0 && r != -EBLACKLISTED) { + ML::m_state = ML::STATE_WAITING_FOR_LOCK; + ML::m_lock.Unlock(); - for (auto &action_ctxs : m_actions_contexts) { - if (action == action_ctxs.first) { - if (ctx != nullptr) { - action_ctxs.second.push_back(ctx); - } + // request the lock from a peer + m_image_ctx.image_watcher->notify_request_lock(); - return; } - } - - Contexts contexts; - if (ctx != nullptr) { - contexts.push_back(ctx); - } - m_actions_contexts.push_back({action, std::move(contexts)}); - } - - template - void ExclusiveLock::execute_action(Action action, Context *ctx) { - assert(m_lock.is_locked()); - - append_context(action, ctx); - if (!is_transition_state()) { - execute_next_action(); - } - } - - template - void ExclusiveLock::execute_next_action() { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); - switch (get_active_action()) { - case ACTION_TRY_LOCK: - case ACTION_REQUEST_LOCK: - send_acquire_lock(); - break; - case ACTION_REACQUIRE_LOCK: - send_reacquire_lock(); - break; - case ACTION_RELEASE_LOCK: - send_release_lock(); - break; - case ACTION_SHUT_DOWN: - send_shutdown(); - break; - default: - assert(false); - break; - } - } - - template - typename ExclusiveLock::Action ExclusiveLock::get_active_action() const { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); - return m_actions_contexts.front().first; - } - - template - void ExclusiveLock::complete_active_action(State next_state, int r) { - assert(m_lock.is_locked()); - assert(!m_actions_contexts.empty()); - - ActionContexts action_contexts(std::move(m_actions_contexts.front())); - m_actions_contexts.pop_front(); - m_state = next_state; - - m_lock.Unlock(); - for (auto ctx : action_contexts.second) { - ctx->complete(r); - } - m_lock.Lock(); - - if (!is_transition_state() && !m_actions_contexts.empty()) { - execute_next_action(); - } - } - - template - bool ExclusiveLock::is_shutdown() const { - assert(m_lock.is_locked()); - - return ((m_state == STATE_SHUTDOWN) || - (!m_actions_contexts.empty() && - m_actions_contexts.back().first == ACTION_SHUT_DOWN)); - } - - template - void ExclusiveLock::handle_init_complete() { - ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl; - Mutex::Locker locker(m_lock); - m_state = STATE_UNLOCKED; - } - - template - void ExclusiveLock::send_acquire_lock() { - assert(m_lock.is_locked()); - if (m_state == STATE_LOCKED) { - complete_active_action(STATE_LOCKED, 0); - return; - } - - CephContext *cct = m_image_ctx.cct; - ldout(cct, 10) << this << " " << __func__ << dendl; - m_state = STATE_ACQUIRING; + ML::m_lock.Unlock(); + if (r == -EAGAIN) { + r = 0; + } - m_watch_handle = m_image_ctx.image_watcher->get_watch_handle(); - if (m_watch_handle == 0) { - lderr(cct) << "image watcher not registered - delaying request" << dendl; - m_state = STATE_WAITING_FOR_REGISTER; + on_finish->complete(r); return; } diff --cc src/librbd/ExclusiveLock.h index 3048c2bdbcbd,1d2a59fa1caf..c12949c0fb9a --- a/src/librbd/ExclusiveLock.h +++ b/src/librbd/ExclusiveLock.h @@@ -37,15 -26,16 +26,16 @@@ public void init(uint64_t features, Context *on_init); void shut_down(Context *on_shutdown); - void try_lock(Context *on_tried_lock); - void request_lock(Context *on_locked); - void release_lock(Context *on_released); - - void reacquire_lock(Context *on_reacquired = nullptr); - - void handle_peer_notification(); + void handle_peer_notification(int r); - static bool decode_lock_cookie(const std::string &cookie, uint64_t *handle); + protected: + virtual void shutdown_handler(int r, Context *on_finish); + virtual void pre_acquire_lock_handler(Context *on_finish); + virtual void post_acquire_lock_handler(int r, Context *on_finish); + virtual void pre_release_lock_handler(bool shutting_down, + Context *on_finish); + virtual void post_release_lock_handler(bool shutting_down, int r, + Context *on_finish); private: @@@ -152,38 -98,10 +98,12 @@@ uint32_t m_request_blocked_count = 0; int m_request_blocked_ret_val = 0; - std::string encode_lock_cookie() const; - - bool is_transition_state() const; - - void append_context(Action action, Context *ctx); - void execute_action(Action action, Context *ctx); - void execute_next_action(); - - Action get_active_action() const; - void complete_active_action(State next_state, int r); - - bool is_shutdown() const; ++ int m_acquire_lock_peer_ret_val = 0; + void handle_init_complete(); - - void send_acquire_lock(); - void handle_acquiring_lock(int r); - void handle_acquire_lock(int r); - - void send_reacquire_lock(); - void handle_reacquire_lock(int r); - - void send_release_lock(); - void handle_releasing_lock(int r); - void handle_release_lock(int r); - - void send_shutdown(); - void send_shutdown_release(); - void handle_shutdown_releasing(int r); - void handle_shutdown_released(int r); - void handle_shutdown(int r); - void complete_shutdown(int r); + void handle_post_acquiring_lock(int r); + void handle_post_acquired_lock(int r); + void handle_pre_releasing_lock(int r); }; } // namespace librbd diff --cc src/librbd/ManagedLock.cc index 000000000000,0447cc24b482..17a0fb81142e mode 000000,100644..100644 --- a/src/librbd/ManagedLock.cc +++ b/src/librbd/ManagedLock.cc @@@ -1,0 -1,680 +1,672 @@@ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- + // vim: ts=8 sw=2 smarttab + + #include "librbd/ManagedLock.h" + #include "librbd/managed_lock/AcquireRequest.h" + #include "librbd/managed_lock/ReleaseRequest.h" + #include "librbd/managed_lock/ReacquireRequest.h" + #include "librbd/Watcher.h" + #include "librbd/ImageCtx.h" + #include "cls/lock/cls_lock_client.h" + #include "common/dout.h" + #include "common/errno.h" + #include "common/WorkQueue.h" + #include "librbd/Utils.h" + #include + + #define dout_subsys ceph_subsys_rbd + #undef dout_prefix + #define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " << __func__ + + namespace librbd { + + using std::string; + + namespace { + + const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto"; + + template + struct C_SendLockRequest : public Context { + R* request; + explicit C_SendLockRequest(R* request) : request(request) { + } + virtual void finish(int r) override { + request->send(); + } + }; + + } // anonymous namespace + + template + const std::string ManagedLock::WATCHER_LOCK_TAG("internal"); + + template + ManagedLock::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue, + const string& oid, Watcher *watcher) + : m_lock(util::unique_lock_name("librbd::ManagedLock::m_lock", this)), + m_state(STATE_UNLOCKED), + m_ioctx(ioctx), m_cct(reinterpret_cast(ioctx.cct())), + m_work_queue(work_queue), + m_oid(oid), + m_watcher(watcher) { + } + + template + ManagedLock::~ManagedLock() { + Mutex::Locker locker(m_lock); + assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED || + m_state == STATE_UNINITIALIZED); + } + + template + bool ManagedLock::is_lock_owner() const { + Mutex::Locker locker(m_lock); + + bool lock_owner; + + switch (m_state) { + case STATE_LOCKED: + case STATE_REACQUIRING: + case STATE_PRE_SHUTTING_DOWN: + case STATE_POST_ACQUIRING: + case STATE_PRE_RELEASING: + lock_owner = true; + break; + default: + lock_owner = false; + break; + } + + ldout(m_cct, 20) << "=" << lock_owner << dendl; + return lock_owner; + } + + template + void ManagedLock::shut_down(Context *on_shut_down) { + ldout(m_cct, 10) << dendl; + + Mutex::Locker locker(m_lock); + assert(!is_shutdown_locked()); + execute_action(ACTION_SHUT_DOWN, on_shut_down); + } + + template + void ManagedLock::acquire_lock(Context *on_acquired) { + int r = 0; + { + Mutex::Locker locker(m_lock); + if (is_shutdown_locked()) { + r = -ESHUTDOWN; + } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { + ldout(m_cct, 10) << dendl; + execute_action(ACTION_ACQUIRE_LOCK, on_acquired); + return; + } + } + + on_acquired->complete(r); + } + + template + void ManagedLock::try_acquire_lock(Context *on_acquired) { + int r = 0; + { + Mutex::Locker locker(m_lock); + if (is_shutdown_locked()) { + r = -ESHUTDOWN; + } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { + ldout(m_cct, 10) << dendl; + execute_action(ACTION_TRY_LOCK, on_acquired); + return; + } + } + + on_acquired->complete(r); + } + + template + void ManagedLock::release_lock(Context *on_released) { + int r = 0; + { + Mutex::Locker locker(m_lock); + if (is_shutdown_locked()) { + r = -ESHUTDOWN; + } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) { + ldout(m_cct, 10) << dendl; + execute_action(ACTION_RELEASE_LOCK, on_released); + return; + } + } + + on_released->complete(r); + } + + template + void ManagedLock::reacquire_lock(Context *on_reacquired) { + { + Mutex::Locker locker(m_lock); + + if (m_state == STATE_WAITING_FOR_REGISTER) { + // restart the acquire lock process now that watch is valid + ldout(m_cct, 10) << ": " << "woke up waiting acquire" << dendl; + Action active_action = get_active_action(); + assert(active_action == ACTION_TRY_LOCK || + active_action == ACTION_ACQUIRE_LOCK); + execute_next_action(); + } else if (!is_shutdown_locked() && + (m_state == STATE_LOCKED || + m_state == STATE_ACQUIRING || + m_state == STATE_POST_ACQUIRING || + m_state == STATE_WAITING_FOR_LOCK)) { + // interlock the lock operation with other state ops + ldout(m_cct, 10) << dendl; + execute_action(ACTION_REACQUIRE_LOCK, on_reacquired); + return; + } + } + + // ignore request if shutdown or not in a locked-related state + if (on_reacquired != nullptr) { + on_reacquired->complete(0); + } + } + + template + void ManagedLock::shutdown_handler(int r, Context *on_finish) { + on_finish->complete(r); + } + + template + void ManagedLock::pre_acquire_lock_handler(Context *on_finish) { + on_finish->complete(0); + } + + template + void ManagedLock::post_acquire_lock_handler(int r, Context *on_finish) { + on_finish->complete(r); + } + + template + void ManagedLock::pre_release_lock_handler(bool shutting_down, + Context *on_finish) { + { + Mutex::Locker locker(m_lock); + m_state = shutting_down ? STATE_SHUTTING_DOWN : STATE_RELEASING; + } + on_finish->complete(0); + } + + template + void ManagedLock::post_release_lock_handler(bool shutting_down, int r, + Context *on_finish) { + on_finish->complete(r); + } + -template -void ManagedLock::assert_locked(librados::ObjectWriteOperation *op, - ClsLockType type) { - Mutex::Locker locker(m_lock); - rados::cls::lock::assert_locked(op, RBD_LOCK_NAME, type, m_cookie, - WATCHER_LOCK_TAG); -} - + template + bool ManagedLock::decode_lock_cookie(const std::string &tag, + uint64_t *handle) { + std::string prefix; + std::istringstream ss(tag); + if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) { + return false; + } + return true; + } + + template + string ManagedLock::encode_lock_cookie(uint64_t watch_handle) { + assert(watch_handle != 0); + std::ostringstream ss; + ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle; + return ss.str(); + } + + template + bool ManagedLock::is_transition_state() const { + switch (m_state) { + case STATE_ACQUIRING: + case STATE_WAITING_FOR_REGISTER: + case STATE_REACQUIRING: + case STATE_RELEASING: + case STATE_PRE_SHUTTING_DOWN: + case STATE_SHUTTING_DOWN: + case STATE_INITIALIZING: + case STATE_WAITING_FOR_LOCK: + case STATE_POST_ACQUIRING: + case STATE_PRE_RELEASING: + return true; + case STATE_UNLOCKED: + case STATE_LOCKED: + case STATE_SHUTDOWN: + case STATE_UNINITIALIZED: + break; + } + return false; + } + + template + void ManagedLock::append_context(Action action, Context *ctx) { + assert(m_lock.is_locked()); + + for (auto &action_ctxs : m_actions_contexts) { + if (action == action_ctxs.first) { + if (ctx != nullptr) { + action_ctxs.second.push_back(ctx); + } + return; + } + } + + Contexts contexts; + if (ctx != nullptr) { + contexts.push_back(ctx); + } + m_actions_contexts.push_back({action, std::move(contexts)}); + } + + template + void ManagedLock::execute_action(Action action, Context *ctx) { + assert(m_lock.is_locked()); + + append_context(action, ctx); + if (!is_transition_state()) { + execute_next_action(); + } + } + + template + void ManagedLock::execute_next_action() { + assert(m_lock.is_locked()); + assert(!m_actions_contexts.empty()); + switch (get_active_action()) { + case ACTION_ACQUIRE_LOCK: + case ACTION_TRY_LOCK: + send_acquire_lock(); + break; + case ACTION_REACQUIRE_LOCK: + send_reacquire_lock(); + break; + case ACTION_RELEASE_LOCK: + send_release_lock(); + break; + case ACTION_SHUT_DOWN: + send_shutdown(); + break; + default: + assert(false); + break; + } + } + + template + typename ManagedLock::Action ManagedLock::get_active_action() const { + assert(m_lock.is_locked()); + assert(!m_actions_contexts.empty()); + return m_actions_contexts.front().first; + } + + template + void ManagedLock::complete_active_action(State next_state, int r) { + assert(m_lock.is_locked()); + assert(!m_actions_contexts.empty()); + + ActionContexts action_contexts(std::move(m_actions_contexts.front())); + m_actions_contexts.pop_front(); + m_state = next_state; + + m_lock.Unlock(); + for (auto ctx : action_contexts.second) { + ctx->complete(r); + } + m_lock.Lock(); + + if (!is_transition_state() && !m_actions_contexts.empty()) { + execute_next_action(); + } + } + + template + bool ManagedLock::is_shutdown_locked() const { + assert(m_lock.is_locked()); + + return ((m_state == STATE_SHUTDOWN) || + (!m_actions_contexts.empty() && + m_actions_contexts.back().first == ACTION_SHUT_DOWN)); + } + + template + void ManagedLock::send_acquire_lock() { + assert(m_lock.is_locked()); + if (m_state == STATE_LOCKED) { + complete_active_action(STATE_LOCKED, 0); + return; + } + + ldout(m_cct, 10) << dendl; + m_state = STATE_ACQUIRING; + + uint64_t watch_handle = m_watcher->get_watch_handle(); + if (watch_handle == 0) { + lderr(m_cct) << "watcher not registered - delaying request" << dendl; + m_state = STATE_WAITING_FOR_REGISTER; + return; + } + m_cookie = ManagedLock::encode_lock_cookie(watch_handle); + + m_work_queue->queue(new FunctionContext([this](int r) { + pre_acquire_lock_handler(util::create_context_callback< + ManagedLock, &ManagedLock::handle_pre_acquire_lock>(this)); + })); + } + + template + void ManagedLock::handle_pre_acquire_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + if (r < 0) { + handle_acquire_lock(r); + return; + } + + using managed_lock::AcquireRequest; + AcquireRequest* req = AcquireRequest::create(m_ioctx, m_watcher, + m_work_queue, m_oid, m_cookie, + util::create_context_callback< + ManagedLock, &ManagedLock::handle_acquire_lock>(this)); + m_work_queue->queue(new C_SendLockRequest>(req), 0); + } + + template + void ManagedLock::handle_acquire_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + if (r == -EBUSY || r == -EAGAIN) { + ldout(m_cct, 5) << ": unable to acquire exclusive lock" << dendl; + } else if (r < 0) { + lderr(m_cct) << ": failed to acquire exclusive lock:" << cpp_strerror(r) + << dendl; + } else { + ldout(m_cct, 5) << ": successfully acquired exclusive lock" << dendl; + } + + m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED); + + m_work_queue->queue(new FunctionContext([this, r](int ret) { + post_acquire_lock_handler(r, util::create_context_callback< + ManagedLock, &ManagedLock::handle_post_acquire_lock>(this)); + })); + } + + template + void ManagedLock::handle_post_acquire_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + Mutex::Locker locker(m_lock); + + if (r < 0 && m_post_next_state == STATE_LOCKED) { + // release_lock without calling pre and post handlers + revert_to_unlock_state(r); + } else { + complete_active_action(m_post_next_state, r); + } + } + + template + void ManagedLock::revert_to_unlock_state(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + using managed_lock::ReleaseRequest; + ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, + m_work_queue, m_oid, m_cookie, + new FunctionContext([this, r](int ret) { + Mutex::Locker locker(m_lock); + assert(ret == 0); + complete_active_action(STATE_UNLOCKED, r); + })); + m_work_queue->queue(new C_SendLockRequest>(req)); + } + + template + void ManagedLock::send_reacquire_lock() { + assert(m_lock.is_locked()); + + if (m_state != STATE_LOCKED) { + complete_active_action(m_state, 0); + return; + } + + uint64_t watch_handle = m_watcher->get_watch_handle(); + if (watch_handle == 0) { + // watch (re)failed while recovering + lderr(m_cct) << ": aborting reacquire due to invalid watch handle" + << dendl; + complete_active_action(STATE_LOCKED, 0); + return; + } + + m_new_cookie = ManagedLock::encode_lock_cookie(watch_handle); + if (m_cookie == m_new_cookie) { + ldout(m_cct, 10) << ": skipping reacquire since cookie still valid" + << dendl; + complete_active_action(STATE_LOCKED, 0); + return; + } + + ldout(m_cct, 10) << dendl; + m_state = STATE_REACQUIRING; + + using managed_lock::ReacquireRequest; + ReacquireRequest* req = ReacquireRequest::create(m_ioctx, m_oid, + m_cookie, m_new_cookie, + util::create_context_callback< + ManagedLock, &ManagedLock::handle_reacquire_lock>(this)); + m_work_queue->queue(new C_SendLockRequest>(req)); + } + + template + void ManagedLock::handle_reacquire_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + Mutex::Locker locker(m_lock); + assert(m_state == STATE_REACQUIRING); + + if (r < 0) { + if (r == -EOPNOTSUPP) { + ldout(m_cct, 10) << ": updating lock is not supported" << dendl; + } else { + lderr(m_cct) << ": failed to update lock cookie: " << cpp_strerror(r) + << dendl; + } + + if (!is_shutdown_locked()) { + // queue a release and re-acquire of the lock since cookie cannot + // be updated on older OSDs + execute_action(ACTION_RELEASE_LOCK, nullptr); + + assert(!m_actions_contexts.empty()); + ActionContexts &action_contexts(m_actions_contexts.front()); + + // reacquire completes when the request lock completes + Contexts contexts; + std::swap(contexts, action_contexts.second); + if (contexts.empty()) { + execute_action(ACTION_ACQUIRE_LOCK, nullptr); + } else { + for (auto ctx : contexts) { + ctx = new FunctionContext([ctx, r](int acquire_ret_val) { + if (acquire_ret_val >= 0) { + acquire_ret_val = r; + } + ctx->complete(acquire_ret_val); + }); + execute_action(ACTION_ACQUIRE_LOCK, ctx); + } + } + } + } else { + m_cookie = m_new_cookie; + } + + complete_active_action(STATE_LOCKED, r); + } + + template + void ManagedLock::send_release_lock() { + assert(m_lock.is_locked()); + if (m_state == STATE_UNLOCKED) { + complete_active_action(STATE_UNLOCKED, 0); + return; + } + + ldout(m_cct, 10) << dendl; + m_state = STATE_PRE_RELEASING; + + m_work_queue->queue(new FunctionContext([this](int r) { + pre_release_lock_handler(false, util::create_context_callback< + ManagedLock, &ManagedLock::handle_pre_release_lock>(this)); + })); + } + + template + void ManagedLock::handle_pre_release_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + if (r < 0) { + handle_release_lock(r); + return; + } + + using managed_lock::ReleaseRequest; + ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, + m_work_queue, m_oid, m_cookie, + util::create_context_callback< + ManagedLock, &ManagedLock::handle_release_lock>(this)); + m_work_queue->queue(new C_SendLockRequest>(req), 0); + } + + template + void ManagedLock::handle_release_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + Mutex::Locker locker(m_lock); + assert(m_state == STATE_RELEASING); + + if (r >= 0) { + m_cookie = ""; + } + + m_post_next_state = r < 0 ? STATE_LOCKED : STATE_UNLOCKED; + + m_work_queue->queue(new FunctionContext([this, r](int ret) { + post_release_lock_handler(false, r, util::create_context_callback< + ManagedLock, &ManagedLock::handle_post_release_lock>(this)); + })); + } + + template + void ManagedLock::handle_post_release_lock(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + Mutex::Locker locker(m_lock); + complete_active_action(m_post_next_state, r); + } + + template + void ManagedLock::send_shutdown() { + ldout(m_cct, 10) << dendl; + assert(m_lock.is_locked()); + if (m_state == STATE_UNLOCKED) { + m_state = STATE_SHUTTING_DOWN; + m_work_queue->queue(new FunctionContext([this](int r) { + shutdown_handler(r, util::create_context_callback< + ManagedLock, &ManagedLock::handle_shutdown>(this)); + })); + return; + } + + ldout(m_cct, 10) << dendl; + assert(m_state == STATE_LOCKED); + m_state = STATE_PRE_SHUTTING_DOWN; + + m_lock.Unlock(); + m_work_queue->queue(new C_ShutDownRelease(this), 0); + m_lock.Lock(); + } + + template + void ManagedLock::handle_shutdown(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + complete_shutdown(r); + } + + template + void ManagedLock::send_shutdown_release() { + ldout(m_cct, 10) << dendl; + + Mutex::Locker locker(m_lock); + + m_work_queue->queue(new FunctionContext([this](int r) { + pre_release_lock_handler(true, util::create_context_callback< + ManagedLock, &ManagedLock::handle_shutdown_pre_release>(this)); + })); + } + + template + void ManagedLock::handle_shutdown_pre_release(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + std::string cookie; + { + Mutex::Locker locker(m_lock); + cookie = m_cookie; + } + + using managed_lock::ReleaseRequest; + ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, + m_work_queue, m_oid, cookie, + new FunctionContext([this](int r) { + post_release_lock_handler(true, r, util::create_context_callback< + ManagedLock, &ManagedLock::handle_shutdown_post_release>(this)); + })); + req->send(); + + } + + template + void ManagedLock::handle_shutdown_post_release(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + complete_shutdown(r); + } + + template + void ManagedLock::complete_shutdown(int r) { + ldout(m_cct, 10) << ": r=" << r << dendl; + + if (r < 0) { + lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r) + << dendl; + } + + ActionContexts action_contexts; + { + Mutex::Locker locker(m_lock); + assert(m_lock.is_locked()); + assert(m_actions_contexts.size() == 1); + + action_contexts = std::move(m_actions_contexts.front()); + m_actions_contexts.pop_front(); + m_state = STATE_SHUTDOWN; + } + + // expect to be destroyed after firing callback + for (auto ctx : action_contexts.second) { + ctx->complete(r); + } + } + + } // namespace librbd + + template class librbd::ManagedLock; diff --cc src/librbd/ManagedLock.h index 000000000000,693ce623af78..dccb8b7f0577 mode 000000,100644..100644 --- a/src/librbd/ManagedLock.h +++ b/src/librbd/ManagedLock.h @@@ -1,0 -1,196 +1,198 @@@ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- + // vim: ts=8 sw=2 smarttab + + #ifndef CEPH_LIBRBD_MANAGED_LOCK_H + #define CEPH_LIBRBD_MANAGED_LOCK_H + + #include "include/int_types.h" + #include "include/Context.h" + #include "include/rados/librados.hpp" + #include "cls/lock/cls_lock_types.h" + #include "librbd/watcher/Types.h" + #include "common/Mutex.h" + #include + #include + #include + + class ContextWQ; + + namespace librbd { + -template ++struct ImageCtx; ++ ++template + class ManagedLock { + private: + typedef watcher::Traits TypeTraits; + typedef typename TypeTraits::Watcher Watcher; + + public: + static const std::string WATCHER_LOCK_TAG; + + static ManagedLock *create(librados::IoCtx& ioctx, ContextWQ *work_queue, + const std::string& oid, Watcher *watcher) { + return new ManagedLock(ioctx, work_queue, oid, watcher); + } + + ManagedLock(librados::IoCtx& ioctx, ContextWQ *work_queue, + const std::string& oid, Watcher *watcher); + virtual ~ManagedLock(); + + bool is_lock_owner() const; + + void shut_down(Context *on_shutdown); + void acquire_lock(Context *on_acquired); + void try_acquire_lock(Context *on_acquired); + void release_lock(Context *on_released); + void reacquire_lock(Context *on_reacquired = nullptr); + - void assert_locked(librados::ObjectWriteOperation *op, ClsLockType type); - + bool is_shutdown() const { + Mutex::Locker l(m_lock); + return is_shutdown_locked(); + } + + bool is_locked_state() const { + return m_state == STATE_LOCKED; + } + + static bool decode_lock_cookie(const std::string &tag, uint64_t *handle); + + protected: + + /** + * @verbatim + * + * + * | + * | + * v (acquire_lock) + * UNLOCKED -----------------------------------------> ACQUIRING + * ^ | + * | | + * RELEASING | + * | | + * | | + * | (release_lock) v + * PRE_RELEASING <----------------------------------------- LOCKED + * + * + * | + * v + * REACQUIRING -------------------------------------> + * . ^ + * . | + * . . . > ---> ---/ + * + * + * | + * | + * v + * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> + * + * @endverbatim + */ + enum State { + STATE_UNINITIALIZED, + STATE_INITIALIZING, + STATE_UNLOCKED, + STATE_LOCKED, + STATE_ACQUIRING, + STATE_POST_ACQUIRING, + STATE_WAITING_FOR_REGISTER, + STATE_WAITING_FOR_LOCK, + STATE_REACQUIRING, + STATE_PRE_RELEASING, + STATE_RELEASING, + STATE_PRE_SHUTTING_DOWN, + STATE_SHUTTING_DOWN, + STATE_SHUTDOWN, + }; + + enum Action { + ACTION_TRY_LOCK, + ACTION_ACQUIRE_LOCK, + ACTION_REACQUIRE_LOCK, + ACTION_RELEASE_LOCK, + ACTION_SHUT_DOWN + }; + + mutable Mutex m_lock; + State m_state; + + virtual void shutdown_handler(int r, Context *on_finish); + virtual void pre_acquire_lock_handler(Context *on_finish); + virtual void post_acquire_lock_handler(int r, Context *on_finish); + virtual void pre_release_lock_handler(bool shutting_down, + Context *on_finish); + virtual void post_release_lock_handler(bool shutting_down, int r, + Context *on_finish); + + Action get_active_action() const; + bool is_shutdown_locked() const; + void execute_next_action(); + + private: + typedef std::list Contexts; + typedef std::pair ActionContexts; + typedef std::list ActionsContexts; + + struct C_ShutDownRelease : public Context { + ManagedLock *lock; + C_ShutDownRelease(ManagedLock *lock) + : lock(lock) { + } + virtual void finish(int r) override { + lock->send_shutdown_release(); + } + }; + + librados::IoCtx& m_ioctx; + CephContext *m_cct; + ContextWQ *m_work_queue; + std::string m_oid; + Watcher *m_watcher; + + std::string m_cookie; + std::string m_new_cookie; + + State m_post_next_state; + + ActionsContexts m_actions_contexts; + + static std::string encode_lock_cookie(uint64_t watch_handle); + + bool is_transition_state() const; + + void append_context(Action action, Context *ctx); + void execute_action(Action action, Context *ctx); + + void complete_active_action(State next_state, int r); + + + void send_acquire_lock(); + void handle_pre_acquire_lock(int r); + void handle_acquire_lock(int r); + void handle_post_acquire_lock(int r); + void revert_to_unlock_state(int r); + + void send_reacquire_lock(); + void handle_reacquire_lock(int r); + + void send_release_lock(); + void handle_pre_release_lock(int r); + void handle_release_lock(int r); + void handle_post_release_lock(int r); + + void send_shutdown(); + void handle_shutdown(int r); + void send_shutdown_release(); + void handle_shutdown_pre_release(int r); + void handle_shutdown_post_release(int r); + void complete_shutdown(int r); + }; + + } // namespace librbd + ++extern template class librbd::ManagedLock; ++ + #endif // CEPH_LIBRBD_MANAGED_LOCK_H diff --cc src/librbd/exclusive_lock/PreReleaseRequest.cc index 000000000000,c03838f3c680..7fecb8180492 mode 000000,100644..100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.cc +++ b/src/librbd/exclusive_lock/PreReleaseRequest.cc @@@ -1,0 -1,247 +1,295 @@@ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- + // vim: ts=8 sw=2 smarttab + + #include "librbd/exclusive_lock/PreReleaseRequest.h" + #include "common/dout.h" + #include "common/errno.h" + #include "librbd/AioImageRequestWQ.h" + #include "librbd/ExclusiveLock.h" + #include "librbd/ManagedLock.h" + #include "librbd/ImageState.h" + #include "librbd/ImageWatcher.h" + #include "librbd/Journal.h" + #include "librbd/ObjectMap.h" + #include "librbd/Utils.h" + + #define dout_subsys ceph_subsys_rbd + #undef dout_prefix + #define dout_prefix *_dout << "librbd::exclusive_lock::PreReleaseRequest: " + + namespace librbd { + namespace exclusive_lock { + + using util::create_async_context_callback; + using util::create_context_callback; + + template + PreReleaseRequest* PreReleaseRequest::create(I &image_ctx, + Context *on_releasing, + Context *on_finish, + bool shutting_down) { + return new PreReleaseRequest(image_ctx, on_releasing, on_finish, + shutting_down); + } + + template + PreReleaseRequest::PreReleaseRequest(I &image_ctx, Context *on_releasing, + Context *on_finish, bool shutting_down) + : m_image_ctx(image_ctx), m_on_releasing(on_releasing), + m_on_finish(create_async_context_callback(image_ctx, on_finish)), + m_shutting_down(shutting_down), m_error_result(0), m_object_map(nullptr), + m_journal(nullptr) { + } + + template + PreReleaseRequest::~PreReleaseRequest() { + if (!m_shutting_down) { + m_image_ctx.state->handle_prepare_lock_complete(); + } + delete m_on_releasing; + } + + template + void PreReleaseRequest::send() { + send_prepare_lock(); + } + + template + void PreReleaseRequest::send_prepare_lock() { + if (m_shutting_down) { + send_cancel_op_requests(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + // release the lock if the image is not busy performing other actions + Context *ctx = create_context_callback< + PreReleaseRequest, &PreReleaseRequest::handle_prepare_lock>(this); + m_image_ctx.state->prepare_lock(ctx); + } + + template + void PreReleaseRequest::handle_prepare_lock(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << ": r=" << r << dendl; + + send_cancel_op_requests(); + } + + template + void PreReleaseRequest::send_cancel_op_requests() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_cancel_op_requests>(this); + m_image_ctx.cancel_async_requests(ctx); + } + + template + void PreReleaseRequest::handle_cancel_op_requests(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << ": r=" << r << dendl; + + assert(r == 0); + + send_block_writes(); + } + + template + void PreReleaseRequest::send_block_writes() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_block_writes>(this); + + { + RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { + m_image_ctx.aio_work_queue->set_require_lock_on_read(); + } + m_image_ctx.aio_work_queue->block_writes(ctx); + } + } + + template + void PreReleaseRequest::handle_block_writes(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << ": r=" << r << dendl; + - save_result(r); - if (r < 0) { ++ if (r == -EBLACKLISTED) { ++ // allow clean shut down if blacklisted ++ lderr(cct) << "failed to block writes because client is blacklisted" ++ << dendl; ++ } else if (r < 0) { ++ lderr(cct) << "failed to block writes: " << cpp_strerror(r) << dendl; + m_image_ctx.aio_work_queue->unblock_writes(); ++ save_result(r); + finish(); + return; + } + - send_image_flush_notifies(); ++ send_invalidate_cache(false); + } + + template -void PreReleaseRequest::send_image_flush_notifies() { ++void PreReleaseRequest::send_invalidate_cache(bool purge_on_error) { ++ if (m_image_ctx.object_cacher == nullptr) { ++ send_flush_notifies(); ++ return; ++ } ++ ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << __func__ << ": purge_on_error=" << purge_on_error << dendl; ++ ++ RWLock::RLocker owner_lock(m_image_ctx.owner_lock); ++ Context *ctx = create_async_context_callback( ++ m_image_ctx, create_context_callback< ++ PreReleaseRequest, ++ &PreReleaseRequest::handle_invalidate_cache>(this)); ++ m_image_ctx.invalidate_cache(purge_on_error, ctx); ++} ++ ++template ++void PreReleaseRequest::handle_invalidate_cache(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << __func__ << ": r=" << r << dendl; ++ ++ if (r == -EBLACKLISTED) { ++ lderr(cct) << "failed to invalidate cache because client is blacklisted" ++ << dendl; ++ if (!m_image_ctx.is_cache_empty()) { ++ // force purge the cache after after being blacklisted ++ send_invalidate_cache(true); ++ return; ++ } ++ } else if (r < 0 && r != -EBUSY) { ++ lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) ++ << dendl; ++ m_image_ctx.aio_work_queue->unblock_writes(); ++ save_result(r); ++ finish(); ++ return; ++ } ++ ++ send_flush_notifies(); ++} ++ ++template ++void PreReleaseRequest::send_flush_notifies() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + using klass = PreReleaseRequest; + Context *ctx = - create_context_callback(this); ++ create_context_callback(this); + m_image_ctx.image_watcher->flush(ctx); + } + + template -void PreReleaseRequest::handle_image_flush_notifies(int r) { ++void PreReleaseRequest::handle_flush_notifies(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + assert(r == 0); + send_close_journal(); + } + + template + void PreReleaseRequest::send_close_journal() { + { + RWLock::WLocker snap_locker(m_image_ctx.snap_lock); + std::swap(m_journal, m_image_ctx.journal); + } + + if (m_journal == nullptr) { + send_close_object_map(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback( + this); + m_journal->close(ctx); + } + + template + void PreReleaseRequest::handle_close_journal(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << ": r=" << r << dendl; + + if (r < 0) { + // error implies some journal events were not flushed -- continue + lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl; + } + + delete m_journal; + + send_close_object_map(); + } + + template + void PreReleaseRequest::send_close_object_map() { + { + RWLock::WLocker snap_locker(m_image_ctx.snap_lock); + std::swap(m_object_map, m_image_ctx.object_map); + } + + if (m_object_map == nullptr) { + send_unlock(); + return; + } + + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + using klass = PreReleaseRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_close_object_map>(this); + m_object_map->close(ctx); + } + + template + void PreReleaseRequest::handle_close_object_map(int r) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << ": r=" << r << dendl; + + // object map shouldn't return errors + assert(r == 0); + delete m_object_map; + + send_unlock(); + } + + template + void PreReleaseRequest::send_unlock() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << __func__ << dendl; + + if (m_on_releasing != nullptr) { + // alert caller that we no longer own the exclusive lock + m_on_releasing->complete(0); + m_on_releasing = nullptr; + } + + finish(); + } + + template + void PreReleaseRequest::finish() { + m_on_finish->complete(m_error_result); + delete this; + } + + } // namespace exclusive_lock + } // namespace librbd + + template class librbd::exclusive_lock::PreReleaseRequest; diff --cc src/librbd/exclusive_lock/PreReleaseRequest.h index 000000000000,b51a5b7cd16f..15f40b922545 mode 000000,100644..100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.h +++ b/src/librbd/exclusive_lock/PreReleaseRequest.h @@@ -1,0 -1,105 +1,111 @@@ + // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- + // vim: ts=8 sw=2 smarttab + + #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H + #define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H + + #include "librbd/ImageCtx.h" + #include + + class Context; + + namespace librbd { + + struct ImageCtx; + template class ManagedLock; + template class Journal; + + namespace exclusive_lock { + + template + class PreReleaseRequest { + public: + static PreReleaseRequest* create(ImageCtxT &image_ctx, Context *on_releasing, + Context *on_finish, bool shutting_down); + + ~PreReleaseRequest(); + void send(); + + private: + /** + * @verbatim + * + * + * | + * v + * PREPARE_LOCK + * | + * v + * CANCEL_OP_REQUESTS + * | + * v + * BLOCK_WRITES + * | + * v ++ * INVALIDATE_CACHE ++ * | ++ * v + * FLUSH_NOTIFIES . . . . . . . . . . . . . . + * | . + * v . + * CLOSE_JOURNAL . + * | (journal disabled, . + * v object map enabled) . + * CLOSE_OBJECT_MAP < . . . . . . . . . . . . + * | . + * v (object map disabled) . + * < . . . . . . . . . . . . . . . . . + * + * @endverbatim + */ + + PreReleaseRequest(ImageCtxT &image_ctx, Context *on_releasing, + Context *on_finish, bool shutting_down); + + ImageCtxT &m_image_ctx; + Context *m_on_releasing; + Context *m_on_finish; + bool m_shutting_down; + + int m_error_result; + + decltype(m_image_ctx.object_map) m_object_map; + decltype(m_image_ctx.journal) m_journal; + + void send_prepare_lock(); + void handle_prepare_lock(int r); + + void send_cancel_op_requests(); + void handle_cancel_op_requests(int r); + + void send_block_writes(); + void handle_block_writes(int r); + - void send_image_flush_notifies(); - void handle_image_flush_notifies(int r); ++ void send_invalidate_cache(bool purge_on_error); ++ void handle_invalidate_cache(int r); ++ ++ void send_flush_notifies(); ++ void handle_flush_notifies(int r); + + void send_close_journal(); + void handle_close_journal(int r); + + void send_close_object_map(); + void handle_close_object_map(int r); + + void send_unlock(); + + void finish(); + + void save_result(int result) { + if (m_error_result == 0 && result < 0) { + m_error_result = result; + } + } + + }; + + } // namespace exclusive_lock + } // namespace librbd + + #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H diff --cc src/librbd/internal.cc index 49e4e4b9f191,bfe226bdb326..d4392b30f1ce --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@@ -33,6 -33,6 +33,9 @@@ #include "librbd/internal.h" #include "librbd/Journal.h" #include "librbd/journal/Types.h" ++#include "librbd/managed_lock/BreakRequest.h" ++#include "librbd/managed_lock/GetLockerRequest.h" ++#include "librbd/managed_lock/Types.h" #include "librbd/mirror/DisableRequest.h" #include "librbd/mirror/EnableRequest.h" #include "librbd/MirroringWatcher.h" @@@ -1532,79 -1484,6 +1532,79 @@@ void filter_out_mirror_watchers(ImageCt return 0; } + int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode, + std::list *lock_owners) + { + CephContext *cct = ictx->cct; + ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl; + - exclusive_lock::Locker locker; ++ managed_lock::Locker locker; + C_SaferCond get_owner_ctx; - auto get_owner_req = exclusive_lock::GetLockerRequest<>::create( ++ auto get_owner_req = managed_lock::GetLockerRequest<>::create( + *ictx, &locker, &get_owner_ctx); + get_owner_req->send(); + + int r = get_owner_ctx.wait(); + if (r == -ENOENT) { + return r; + } else if (r < 0) { + lderr(cct) << "failed to determine current lock owner: " + << cpp_strerror(r) << dendl; + return r; + } + + *lock_mode = RBD_LOCK_MODE_EXCLUSIVE; + lock_owners->clear(); + lock_owners->emplace_back(locker.address); + return 0; + } + + int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode, + const std::string &lock_owner) + { + CephContext *cct = ictx->cct; + ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", " + << "lock_mode=" << lock_mode << ", " + << "lock_owner=" << lock_owner << dendl; + + if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) { + return -EOPNOTSUPP; + } + - exclusive_lock::Locker locker; ++ managed_lock::Locker locker; + C_SaferCond get_owner_ctx; - auto get_owner_req = exclusive_lock::GetLockerRequest<>::create( ++ auto get_owner_req = managed_lock::GetLockerRequest<>::create( + *ictx, &locker, &get_owner_ctx); + get_owner_req->send(); + + int r = get_owner_ctx.wait(); + if (r == -ENOENT) { + return r; + } else if (r < 0) { + lderr(cct) << "failed to determine current lock owner: " + << cpp_strerror(r) << dendl; + return r; + } + + if (locker.address != lock_owner) { + return -EBUSY; + } + + C_SaferCond break_ctx; - auto break_req = exclusive_lock::BreakRequest<>::create( ++ auto break_req = managed_lock::BreakRequest<>::create( + *ictx, locker, ictx->blacklist_on_break_lock, true, &break_ctx); + break_req->send(); + + r = break_ctx.wait(); + if (r == -ENOENT) { + return r; + } else if (r < 0) { + lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl; + return r; + } + return 0; + } + int remove(IoCtx& io_ctx, const std::string &image_name, const std::string &image_id, ProgressContext& prog_ctx, bool force) diff --cc src/librbd/managed_lock/BreakRequest.cc index 000000000000,000000000000..579bf793979a new file mode 100644 --- /dev/null +++ b/src/librbd/managed_lock/BreakRequest.cc @@@ -1,0 -1,0 +1,183 @@@ ++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#include "librbd/managed_lock/BreakRequest.h" ++#include "common/dout.h" ++#include "common/errno.h" ++#include "common/WorkQueue.h" ++#include "include/stringify.h" ++#include "cls/lock/cls_lock_client.h" ++#include "cls/lock/cls_lock_types.h" ++#include "librbd/Utils.h" ++#include "librbd/managed_lock/Types.h" ++ ++#define dout_subsys ceph_subsys_rbd ++#undef dout_prefix ++#define dout_prefix *_dout << "librbd::managed_lock::BreakRequest: " << this \ ++ << " " << __func__ << ": " ++ ++namespace librbd { ++namespace managed_lock { ++ ++using util::create_context_callback; ++using util::create_rados_ack_callback; ++using util::create_rados_safe_callback; ++ ++namespace { ++ ++template ++struct C_BlacklistClient : public Context { ++ I &image_ctx; ++ std::string locker_address; ++ Context *on_finish; ++ ++ C_BlacklistClient(I &image_ctx, const std::string &locker_address, ++ Context *on_finish) ++ : image_ctx(image_ctx), locker_address(locker_address), ++ on_finish(on_finish) { ++ } ++ ++ virtual void finish(int r) override { ++ librados::Rados rados(image_ctx.md_ctx); ++ r = rados.blacklist_add(locker_address, ++ image_ctx.blacklist_expire_seconds); ++ on_finish->complete(r); ++ } ++}; ++ ++} // anonymous namespace ++ ++template ++void BreakRequest::send() { ++ send_get_watchers(); ++} ++ ++template ++void BreakRequest::send_get_watchers() { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << dendl; ++ ++ librados::ObjectReadOperation op; ++ op.list_watchers(&m_watchers, &m_watchers_ret_val); ++ ++ using klass = BreakRequest; ++ librados::AioCompletion *rados_completion = ++ create_rados_ack_callback(this); ++ m_out_bl.clear(); ++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, ++ rados_completion, &op, &m_out_bl); ++ assert(r == 0); ++ rados_completion->release(); ++} ++ ++template ++void BreakRequest::handle_get_watchers(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ if (r == 0) { ++ r = m_watchers_ret_val; ++ } ++ if (r < 0) { ++ lderr(cct) << "failed to retrieve watchers: " << cpp_strerror(r) ++ << dendl; ++ finish(r); ++ return; ++ } ++ ++ for (auto &watcher : m_watchers) { ++ if ((strncmp(m_locker.address.c_str(), ++ watcher.addr, sizeof(watcher.addr)) == 0) && ++ (m_locker.handle == watcher.cookie)) { ++ ldout(cct, 10) << "lock owner is still alive" << dendl; ++ ++ if (m_force_break_lock) { ++ break; ++ } else { ++ finish(-EAGAIN); ++ return; ++ } ++ } ++ } ++ ++ send_blacklist(); ++} ++ ++template ++void BreakRequest::send_blacklist() { ++ if (!m_blacklist_locker) { ++ send_break_lock(); ++ return; ++ } ++ ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << dendl; ++ ++ // TODO: need async version of RadosClient::blacklist_add ++ using klass = BreakRequest; ++ Context *ctx = create_context_callback( ++ this); ++ m_image_ctx.op_work_queue->queue(new C_BlacklistClient(m_image_ctx, ++ m_locker.address, ++ ctx), 0); ++} ++ ++template ++void BreakRequest::handle_blacklist(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ if (r < 0) { ++ lderr(cct) << "failed to blacklist lock owner: " << cpp_strerror(r) ++ << dendl; ++ finish(r); ++ return; ++ } ++ send_break_lock(); ++} ++ ++template ++void BreakRequest::send_break_lock() { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << dendl; ++ ++ librados::ObjectWriteOperation op; ++ rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker.cookie, ++ m_locker.entity); ++ ++ using klass = BreakRequest; ++ librados::AioCompletion *rados_completion = ++ create_rados_safe_callback(this); ++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, ++ rados_completion, &op); ++ assert(r == 0); ++ rados_completion->release(); ++} ++ ++template ++void BreakRequest::handle_break_lock(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ if (r < 0 && r != -ENOENT) { ++ lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl; ++ finish(r); ++ return; ++ } ++ ++ finish(0); ++} ++ ++template ++void BreakRequest::finish(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ m_on_finish->complete(r); ++ delete this; ++} ++ ++} // namespace managed_lock ++} // namespace librbd ++ ++template class librbd::managed_lock::BreakRequest; diff --cc src/librbd/managed_lock/BreakRequest.h index 000000000000,000000000000..5e40b2b5c667 new file mode 100644 --- /dev/null +++ b/src/librbd/managed_lock/BreakRequest.h @@@ -1,0 -1,0 +1,95 @@@ ++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#ifndef CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H ++#define CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H ++ ++#include "include/int_types.h" ++#include "include/buffer.h" ++#include "msg/msg_types.h" ++#include "librbd/ImageCtx.h" ++#include ++#include ++#include ++ ++class Context; ++ ++namespace librbd { ++ ++template class Journal; ++ ++namespace managed_lock { ++ ++struct Locker; ++ ++template ++class BreakRequest { ++public: ++ static BreakRequest* create(ImageCtxT &image_ctx, const Locker &locker, ++ bool blacklist_locker, bool force_break_lock, ++ Context *on_finish) { ++ return new BreakRequest(image_ctx, locker, blacklist_locker, ++ force_break_lock, on_finish); ++ } ++ ++ void send(); ++ ++private: ++ /** ++ * @verbatim ++ * ++ * ++ * | ++ * v ++ * GET_WATCHERS ++ * | ++ * v ++ * BLACKLIST (skip if disabled) ++ * | ++ * v ++ * BREAK_LOCK ++ * | ++ * v ++ * ++ * ++ * @endvertbatim ++ */ ++ ++ ImageCtxT &m_image_ctx; ++ const Locker &m_locker; ++ bool m_blacklist_locker; ++ bool m_force_break_lock; ++ Context *m_on_finish; ++ ++ bufferlist m_out_bl; ++ ++ std::list m_watchers; ++ int m_watchers_ret_val; ++ ++ BreakRequest(ImageCtxT &image_ctx, const Locker &locker, ++ bool blacklist_locker, bool force_break_lock, ++ Context *on_finish) ++ : m_image_ctx(image_ctx), m_locker(locker), ++ m_blacklist_locker(blacklist_locker), ++ m_force_break_lock(force_break_lock), m_on_finish(on_finish) { ++ } ++ ++ void send_get_watchers(); ++ void handle_get_watchers(int r); ++ ++ void send_blacklist(); ++ void handle_blacklist(int r); ++ ++ void send_break_lock(); ++ void handle_break_lock(int r); ++ ++ void finish(int r); ++ ++}; ++ ++} // namespace managed_lock ++} // namespace librbd ++ ++extern template class librbd::managed_lock::BreakRequest; ++ ++#endif // CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H diff --cc src/librbd/managed_lock/GetLockerRequest.cc index 000000000000,000000000000..e6da32766fe2 new file mode 100644 --- /dev/null +++ b/src/librbd/managed_lock/GetLockerRequest.cc @@@ -1,0 -1,0 +1,124 @@@ ++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#include "librbd/managed_lock/GetLockerRequest.h" ++#include "cls/lock/cls_lock_client.h" ++#include "cls/lock/cls_lock_types.h" ++#include "common/dout.h" ++#include "common/errno.h" ++#include "include/stringify.h" ++#include "librbd/ImageCtx.h" ++#include "librbd/ManagedLock.h" ++#include "librbd/Utils.h" ++#include "librbd/managed_lock/Types.h" ++ ++#define dout_subsys ceph_subsys_rbd ++#undef dout_prefix ++#define dout_prefix *_dout << "librbd::managed_lock::GetLockerRequest: " \ ++ << this << " " << __func__ << ": " ++ ++namespace librbd { ++namespace managed_lock { ++ ++using util::create_rados_ack_callback; ++ ++template ++void GetLockerRequest::send() { ++ send_get_lockers(); ++} ++ ++template ++void GetLockerRequest::send_get_lockers() { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << dendl; ++ ++ librados::ObjectReadOperation op; ++ rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME); ++ ++ using klass = GetLockerRequest; ++ librados::AioCompletion *rados_completion = ++ create_rados_ack_callback(this); ++ m_out_bl.clear(); ++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, ++ rados_completion, &op, &m_out_bl); ++ assert(r == 0); ++ rados_completion->release(); ++} ++ ++template ++void GetLockerRequest::handle_get_lockers(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ std::map lockers; ++ ClsLockType lock_type = LOCK_NONE; ++ std::string lock_tag; ++ if (r == 0) { ++ bufferlist::iterator it = m_out_bl.begin(); ++ r = rados::cls::lock::get_lock_info_finish(&it, &lockers, ++ &lock_type, &lock_tag); ++ } ++ ++ if (r < 0) { ++ lderr(cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl; ++ finish(r); ++ return; ++ } ++ ++ if (lockers.empty()) { ++ ldout(cct, 20) << "no lockers detected" << dendl; ++ finish(-ENOENT); ++ return; ++ } ++ ++ if (lock_tag != ManagedLock<>::WATCHER_LOCK_TAG) { ++ ldout(cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl; ++ finish(-EBUSY); ++ return; ++ } ++ ++ if (lock_type == LOCK_SHARED) { ++ ldout(cct, 5) << "shared lock type detected" << dendl; ++ finish(-EBUSY); ++ return; ++ } ++ ++ std::map::iterator iter = lockers.begin(); ++ if (!ManagedLock<>::decode_lock_cookie(iter->first.cookie, ++ &m_locker->handle)) { ++ ldout(cct, 5) << "locked by external mechanism: " ++ << "cookie=" << iter->first.cookie << dendl; ++ finish(-EBUSY); ++ return; ++ } ++ ++ m_locker->entity = iter->first.locker; ++ m_locker->cookie = iter->first.cookie; ++ m_locker->address = stringify(iter->second.addr); ++ if (m_locker->cookie.empty() || m_locker->address.empty()) { ++ ldout(cct, 20) << "no valid lockers detected" << dendl; ++ finish(-ENOENT); ++ return; ++ } ++ ++ ldout(cct, 10) << "retrieved exclusive locker: " ++ << m_locker->entity << "@" << m_locker->address << dendl; ++ finish(0); ++} ++ ++template ++void GetLockerRequest::finish(int r) { ++ CephContext *cct = m_image_ctx.cct; ++ ldout(cct, 10) << "r=" << r << dendl; ++ ++ m_on_finish->complete(r); ++ delete this; ++} ++ ++} // namespace managed_lock ++} // namespace librbd ++ ++template class librbd::managed_lock::GetLockerRequest; ++ diff --cc src/librbd/managed_lock/GetLockerRequest.h index 000000000000,000000000000..b0a5788465a8 new file mode 100644 --- /dev/null +++ b/src/librbd/managed_lock/GetLockerRequest.h @@@ -1,0 -1,0 +1,53 @@@ ++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#ifndef CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H ++#define CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H ++ ++#include "include/int_types.h" ++#include "include/buffer.h" ++ ++class Context; ++ ++namespace librbd { ++ ++struct ImageCtx; ++ ++namespace managed_lock { ++ ++struct Locker; ++ ++template ++class GetLockerRequest { ++public: ++ static GetLockerRequest* create(ImageCtxT &image_ctx, Locker *locker, ++ Context *on_finish) { ++ return new GetLockerRequest(image_ctx, locker, on_finish); ++ } ++ ++ void send(); ++ ++private: ++ ImageCtxT &m_image_ctx; ++ Locker *m_locker; ++ Context *m_on_finish; ++ ++ bufferlist m_out_bl; ++ ++ GetLockerRequest(ImageCtxT &image_ctx, Locker *locker, Context *on_finish) ++ : m_image_ctx(image_ctx), m_locker(locker), m_on_finish(on_finish) { ++ } ++ ++ void send_get_lockers(); ++ void handle_get_lockers(int r); ++ ++ void finish(int r); ++ ++}; ++ ++} // namespace managed_lock ++} // namespace librbd ++ ++extern template class librbd::managed_lock::GetLockerRequest; ++ ++#endif // CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H diff --cc src/librbd/managed_lock/Types.h index 000000000000,000000000000..beb764ea5b52 new file mode 100644 --- /dev/null +++ b/src/librbd/managed_lock/Types.h @@@ -1,0 -1,0 +1,23 @@@ ++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#ifndef CEPH_LIBRBD_MANAGED_LOCK_TYPES_H ++#define CEPH_LIBRBD_MANAGED_LOCK_TYPES_H ++ ++#include "msg/msg_types.h" ++#include ++ ++namespace librbd { ++namespace managed_lock { ++ ++struct Locker { ++ entity_name_t entity; ++ std::string cookie; ++ std::string address; ++ uint64_t handle; ++}; ++ ++} // namespace managed_lock ++} // namespace librbd ++ ++#endif // CEPH_LIBRBD_MANAGED_LOCK_TYPES_H diff --cc src/test/librbd/CMakeLists.txt index f58e2950e540,0e675a462432..82c08bd01e7f --- a/src/test/librbd/CMakeLists.txt +++ b/src/test/librbd/CMakeLists.txt @@@ -30,18 -30,14 +30,20 @@@ set(unittest_librbd_src test_mock_AioImageRequest.cc test_mock_ExclusiveLock.cc test_mock_Journal.cc ++ test_mock_ManagedLock.cc test_mock_ObjectMap.cc - test_mock_ObjectWatcher.cc - exclusive_lock/test_mock_AcquireRequest.cc - exclusive_lock/test_mock_BreakRequest.cc - exclusive_lock/test_mock_GetLockerRequest.cc - exclusive_lock/test_mock_ReacquireRequest.cc - exclusive_lock/test_mock_ReleaseRequest.cc + exclusive_lock/test_mock_PreAcquireRequest.cc + exclusive_lock/test_mock_PostAcquireRequest.cc + exclusive_lock/test_mock_PreReleaseRequest.cc image/test_mock_RefreshRequest.cc - image_watcher/test_mock_RewatchRequest.cc journal/test_mock_OpenRequest.cc journal/test_mock_PromoteRequest.cc journal/test_mock_Replay.cc ++ managed_lock/test_mock_AcquireRequest.cc ++ managed_lock/test_mock_BreakRequest.cc ++ managed_lock/test_mock_GetLockerRequest.cc ++ managed_lock/test_mock_ReacquireRequest.cc ++ managed_lock/test_mock_ReleaseRequest.cc mirror/test_mock_DisableRequest.cc object_map/test_mock_InvalidateRequest.cc object_map/test_mock_LockRequest.cc @@@ -61,6 -57,11 +63,7 @@@ operation/test_mock_SnapshotRemoveRequest.cc operation/test_mock_SnapshotRollbackRequest.cc operation/test_mock_SnapshotUnprotectRequest.cc + watcher/test_mock_RewatchRequest.cc - managed_lock/test_mock_ReleaseRequest.cc - managed_lock/test_mock_ReacquireRequest.cc - managed_lock/test_mock_AcquireRequest.cc - test_mock_ManagedLock.cc ) add_executable(unittest_librbd ${unittest_librbd_srcs} diff --cc src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc index 000000000000,3c5e8c8e72b5..4cf9c2254aa0 mode 000000,100644..100644 --- a/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc +++ b/src/test/librbd/exclusive_lock/test_mock_PreReleaseRequest.cc @@@ -1,0 -1,249 +1,302 @@@ + // -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- + // vim: ts=8 sw=2 smarttab + + #include "test/librbd/test_mock_fixture.h" + #include "test/librbd/test_support.h" + #include "test/librbd/mock/MockImageCtx.h" + #include "test/librbd/mock/MockJournal.h" + #include "test/librbd/mock/MockObjectMap.h" + #include "test/librados_test_stub/MockTestMemIoCtxImpl.h" + #include "librbd/exclusive_lock/PreReleaseRequest.h" + #include "gmock/gmock.h" + #include "gtest/gtest.h" + #include + + // template definitions + #include "librbd/exclusive_lock/PreReleaseRequest.cc" + template class librbd::exclusive_lock::PreReleaseRequest; + + namespace librbd { + + using librbd::ManagedLock; + + namespace exclusive_lock { + + namespace { + + struct MockContext : public Context { + MOCK_METHOD1(complete, void(int)); + MOCK_METHOD1(finish, void(int)); + }; + + } // anonymous namespace + + using ::testing::_; + using ::testing::InSequence; + using ::testing::Invoke; + using ::testing::Return; + using ::testing::StrEq; ++using ::testing::WithArg; + + static const std::string TEST_COOKIE("auto 123"); + + class TestMockExclusiveLockPreReleaseRequest : public TestMockFixture { + public: + typedef PreReleaseRequest MockPreReleaseRequest; + + void expect_complete_context(MockContext &mock_context, int r) { + EXPECT_CALL(mock_context, complete(r)); + } + + void expect_test_features(MockImageCtx &mock_image_ctx, uint64_t features, + bool enabled) { + EXPECT_CALL(mock_image_ctx, test_features(features)) + .WillOnce(Return(enabled)); + } + + void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read()); + } + + void expect_block_writes(MockImageCtx &mock_image_ctx, int r) { + expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING, + ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0)); + if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { + expect_set_require_lock_on_read(mock_image_ctx); + } + EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_)) + .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); + } + + void expect_unblock_writes(MockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes()); + } + + void expect_cancel_op_requests(MockImageCtx &mock_image_ctx, int r) { + EXPECT_CALL(mock_image_ctx, cancel_async_requests(_)) + .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); + } + + void expect_close_journal(MockImageCtx &mock_image_ctx, + MockJournal &mock_journal, int r) { + EXPECT_CALL(mock_journal, close(_)) + .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue)); + } + + void expect_close_object_map(MockImageCtx &mock_image_ctx, + MockObjectMap &mock_object_map) { + EXPECT_CALL(mock_object_map, close(_)) + .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); + } + ++ void expect_invalidate_cache(MockImageCtx &mock_image_ctx, bool purge, ++ int r) { ++ if (mock_image_ctx.object_cacher != nullptr) { ++ EXPECT_CALL(mock_image_ctx, invalidate_cache(purge, _)) ++ .WillOnce(WithArg<1>(CompleteContext(r, NULL))); ++ } ++ } ++ ++ void expect_is_cache_empty(MockImageCtx &mock_image_ctx, bool empty) { ++ if (mock_image_ctx.object_cacher != nullptr) { ++ EXPECT_CALL(mock_image_ctx, is_cache_empty()) ++ .WillOnce(Return(empty)); ++ } ++ } ++ + void expect_flush_notifies(MockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_)) + .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue)); + } + + void expect_prepare_lock(MockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_)) + .WillOnce(Invoke([](Context *on_ready) { + on_ready->complete(0); + })); + } + + void expect_handle_prepare_lock_complete(MockImageCtx &mock_image_ctx) { + EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete()); + } + + }; + + TEST_F(TestMockExclusiveLockPreReleaseRequest, Success) { + REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockImageCtx mock_image_ctx(*ictx); + expect_op_work_queue(mock_image_ctx); + + InSequence seq; + + expect_prepare_lock(mock_image_ctx); + expect_cancel_op_requests(mock_image_ctx, 0); + expect_block_writes(mock_image_ctx, 0); ++ expect_invalidate_cache(mock_image_ctx, false, 0); + expect_flush_notifies(mock_image_ctx); + + MockJournal *mock_journal = new MockJournal(); + mock_image_ctx.journal = mock_journal; + expect_close_journal(mock_image_ctx, *mock_journal, -EINVAL); + + MockObjectMap *mock_object_map = new MockObjectMap(); + mock_image_ctx.object_map = mock_object_map; + expect_close_object_map(mock_image_ctx, *mock_object_map); + + MockContext mock_releasing_ctx; + expect_complete_context(mock_releasing_ctx, 0); + expect_handle_prepare_lock_complete(mock_image_ctx); + + C_SaferCond ctx; - MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx, - &mock_releasing_ctx, - &ctx, false); ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, &mock_releasing_ctx, &ctx, false); + req->send(); + ASSERT_EQ(0, ctx.wait()); + } + + TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessJournalDisabled) { + REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockImageCtx mock_image_ctx(*ictx); + + expect_block_writes(mock_image_ctx, 0); + expect_op_work_queue(mock_image_ctx); + + InSequence seq; + expect_prepare_lock(mock_image_ctx); + expect_cancel_op_requests(mock_image_ctx, 0); ++ expect_invalidate_cache(mock_image_ctx, false, 0); + expect_flush_notifies(mock_image_ctx); + + MockObjectMap *mock_object_map = new MockObjectMap(); + mock_image_ctx.object_map = mock_object_map; + expect_close_object_map(mock_image_ctx, *mock_object_map); + + expect_handle_prepare_lock_complete(mock_image_ctx); + + C_SaferCond release_ctx; + C_SaferCond ctx; - MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx, - &release_ctx, &ctx, - false); ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, &release_ctx, &ctx, false); + req->send(); + ASSERT_EQ(0, release_ctx.wait()); + ASSERT_EQ(0, ctx.wait()); + } + + TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessObjectMapDisabled) { + REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockImageCtx mock_image_ctx(*ictx); + + expect_block_writes(mock_image_ctx, 0); + expect_op_work_queue(mock_image_ctx); + + InSequence seq; + expect_cancel_op_requests(mock_image_ctx, 0); ++ expect_invalidate_cache(mock_image_ctx, false, 0); + expect_flush_notifies(mock_image_ctx); + + C_SaferCond release_ctx; + C_SaferCond ctx; - MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx, - &release_ctx, &ctx, - true); ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, &release_ctx, &ctx, true); + req->send(); + ASSERT_EQ(0, release_ctx.wait()); + ASSERT_EQ(0, ctx.wait()); + } + ++TEST_F(TestMockExclusiveLockPreReleaseRequest, Blacklisted) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_prepare_lock(mock_image_ctx); ++ expect_cancel_op_requests(mock_image_ctx, 0); ++ expect_block_writes(mock_image_ctx, -EBLACKLISTED); ++ expect_invalidate_cache(mock_image_ctx, false, -EBLACKLISTED); ++ expect_is_cache_empty(mock_image_ctx, false); ++ expect_invalidate_cache(mock_image_ctx, true, -EBLACKLISTED); ++ expect_is_cache_empty(mock_image_ctx, true); ++ expect_flush_notifies(mock_image_ctx); ++ ++ MockJournal *mock_journal = new MockJournal(); ++ mock_image_ctx.journal = mock_journal; ++ expect_close_journal(mock_image_ctx, *mock_journal, -EBLACKLISTED); ++ ++ MockObjectMap *mock_object_map = new MockObjectMap(); ++ mock_image_ctx.object_map = mock_object_map; ++ expect_close_object_map(mock_image_ctx, *mock_object_map); ++ ++ MockContext mock_releasing_ctx; ++ expect_complete_context(mock_releasing_ctx, 0); ++ expect_handle_prepare_lock_complete(mock_image_ctx); ++ ++ C_SaferCond ctx; ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, &mock_releasing_ctx, &ctx, false); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++} ++ + TEST_F(TestMockExclusiveLockPreReleaseRequest, BlockWritesError) { + REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockImageCtx mock_image_ctx(*ictx); + + expect_op_work_queue(mock_image_ctx); + + InSequence seq; + expect_cancel_op_requests(mock_image_ctx, 0); + expect_block_writes(mock_image_ctx, -EINVAL); + expect_unblock_writes(mock_image_ctx); + + C_SaferCond ctx; - MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx, - nullptr, &ctx, - true); ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, nullptr, &ctx, true); + req->send(); + ASSERT_EQ(-EINVAL, ctx.wait()); + } + + TEST_F(TestMockExclusiveLockPreReleaseRequest, UnlockError) { + REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); + + librbd::ImageCtx *ictx; + ASSERT_EQ(0, open_image(m_image_name, &ictx)); + + MockImageCtx mock_image_ctx(*ictx); + + expect_op_work_queue(mock_image_ctx); + + InSequence seq; + expect_cancel_op_requests(mock_image_ctx, 0); + expect_block_writes(mock_image_ctx, 0); ++ expect_invalidate_cache(mock_image_ctx, false, 0); + expect_flush_notifies(mock_image_ctx); + + C_SaferCond ctx; - MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx, - nullptr, &ctx, - true); ++ MockPreReleaseRequest *req = MockPreReleaseRequest::create( ++ mock_image_ctx, nullptr, &ctx, true); + req->send(); + ASSERT_EQ(0, ctx.wait()); + } + + } // namespace exclusive_lock + } // namespace librbd diff --cc src/test/librbd/managed_lock/test_mock_BreakRequest.cc index 000000000000,000000000000..dcc25091ea6b new file mode 100644 --- /dev/null +++ b/src/test/librbd/managed_lock/test_mock_BreakRequest.cc @@@ -1,0 -1,0 +1,249 @@@ ++// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#include "test/librbd/test_mock_fixture.h" ++#include "test/librbd/test_support.h" ++#include "test/librbd/mock/MockImageCtx.h" ++#include "test/librados_test_stub/MockTestMemIoCtxImpl.h" ++#include "test/librados_test_stub/MockTestMemRadosClient.h" ++#include "cls/lock/cls_lock_ops.h" ++#include "librbd/ManagedLock.h" ++#include "librbd/managed_lock/BreakRequest.h" ++#include "gmock/gmock.h" ++#include "gtest/gtest.h" ++#include ++#include ++ ++namespace librbd { ++namespace { ++ ++struct MockTestImageCtx : public librbd::MockImageCtx { ++ MockTestImageCtx(librbd::ImageCtx &image_ctx) ++ : librbd::MockImageCtx(image_ctx) { ++ } ++}; ++ ++} // anonymous namespace ++} // namespace librbd ++ ++// template definitions ++#include "librbd/managed_lock/BreakRequest.cc" ++ ++namespace librbd { ++namespace managed_lock { ++ ++using ::testing::_; ++using ::testing::DoAll; ++using ::testing::InSequence; ++using ::testing::Return; ++using ::testing::SetArgPointee; ++using ::testing::StrEq; ++using ::testing::WithArg; ++ ++class TestMockManagedLockBreakRequest : public TestMockFixture { ++public: ++ typedef BreakRequest MockBreakRequest; ++ ++ void expect_list_watchers(MockTestImageCtx &mock_image_ctx, int r, ++ const std::string &address, uint64_t watch_handle) { ++ auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), ++ list_watchers(mock_image_ctx.header_oid, _)); ++ if (r < 0) { ++ expect.WillOnce(Return(r)); ++ } else { ++ obj_watch_t watcher; ++ strcpy(watcher.addr, (address + ":0/0").c_str()); ++ watcher.cookie = watch_handle; ++ ++ std::list watchers; ++ watchers.push_back(watcher); ++ ++ expect.WillOnce(DoAll(SetArgPointee<1>(watchers), Return(0))); ++ } ++ } ++ ++ void expect_blacklist_add(MockTestImageCtx &mock_image_ctx, int r) { ++ EXPECT_CALL(get_mock_rados_client(), blacklist_add(_, _)) ++ .WillOnce(Return(r)); ++ } ++ ++ void expect_break_lock(MockTestImageCtx &mock_image_ctx, int r) { ++ EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), ++ exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _)) ++ .WillOnce(Return(r)); ++ } ++}; ++ ++TEST_F(TestMockManagedLockBreakRequest, DeadLockOwner) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123); ++ expect_blacklist_add(mock_image_ctx, 0); ++ expect_break_lock(mock_image_ctx, 0); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, ForceBreak) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123); ++ expect_blacklist_add(mock_image_ctx, 0); ++ expect_break_lock(mock_image_ctx, 0); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, true, &ctx); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, GetWatchersError) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, -EINVAL, "dead client", 123); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(-EINVAL, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, GetWatchersAlive) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(-EAGAIN, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, BlacklistDisabled) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123); ++ expect_break_lock(mock_image_ctx, 0); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ false, false, &ctx); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, BlacklistError) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123); ++ expect_blacklist_add(mock_image_ctx, -EINVAL); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(-EINVAL, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, BreakLockMissing) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123); ++ expect_blacklist_add(mock_image_ctx, 0); ++ expect_break_lock(mock_image_ctx, -ENOENT); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockBreakRequest, BreakLockError) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123); ++ expect_blacklist_add(mock_image_ctx, 0); ++ expect_break_lock(mock_image_ctx, -EINVAL); ++ ++ C_SaferCond ctx; ++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123}; ++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker, ++ true, false, &ctx); ++ req->send(); ++ ASSERT_EQ(-EINVAL, ctx.wait()); ++} ++ ++} // namespace managed_lock ++} // namespace librbd ++ diff --cc src/test/librbd/managed_lock/test_mock_GetLockerRequest.cc index 000000000000,000000000000..81c480bea756 new file mode 100644 --- /dev/null +++ b/src/test/librbd/managed_lock/test_mock_GetLockerRequest.cc @@@ -1,0 -1,0 +1,216 @@@ ++// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- ++// vim: ts=8 sw=2 smarttab ++ ++#include "test/librbd/test_mock_fixture.h" ++#include "test/librbd/test_support.h" ++#include "test/librbd/mock/MockImageCtx.h" ++#include "test/librados_test_stub/MockTestMemIoCtxImpl.h" ++#include "test/librados_test_stub/MockTestMemRadosClient.h" ++#include "cls/lock/cls_lock_ops.h" ++#include "librbd/ManagedLock.h" ++#include "librbd/managed_lock/GetLockerRequest.h" ++#include "librbd/managed_lock/Types.h" ++#include "gmock/gmock.h" ++#include "gtest/gtest.h" ++#include ++#include ++ ++namespace librbd { ++namespace { ++ ++struct MockTestImageCtx : public librbd::MockImageCtx { ++ MockTestImageCtx(librbd::ImageCtx &image_ctx) ++ : librbd::MockImageCtx(image_ctx) { ++ } ++}; ++ ++} // anonymous namespace ++} // namespace librbd ++ ++// template definitions ++#include "librbd/managed_lock/GetLockerRequest.cc" ++ ++namespace librbd { ++namespace managed_lock { ++ ++using ::testing::_; ++using ::testing::DoAll; ++using ::testing::InSequence; ++using ::testing::Return; ++using ::testing::StrEq; ++using ::testing::WithArg; ++ ++class TestMockManagedLockGetLockerRequest : public TestMockFixture { ++public: ++ typedef GetLockerRequest MockGetLockerRequest; ++ ++ void expect_get_lock_info(MockTestImageCtx &mock_image_ctx, int r, ++ const entity_name_t &locker_entity, ++ const std::string &locker_address, ++ const std::string &locker_cookie, ++ const std::string &lock_tag, ++ ClsLockType lock_type) { ++ auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx), ++ exec(mock_image_ctx.header_oid, _, StrEq("lock"), ++ StrEq("get_info"), _, _, _)); ++ if (r < 0 && r != -ENOENT) { ++ expect.WillOnce(Return(r)); ++ } else { ++ entity_name_t entity(locker_entity); ++ entity_addr_t entity_addr; ++ entity_addr.parse(locker_address.c_str(), NULL); ++ ++ cls_lock_get_info_reply reply; ++ if (r != -ENOENT) { ++ reply.lockers = decltype(reply.lockers){ ++ {rados::cls::lock::locker_id_t(entity, locker_cookie), ++ rados::cls::lock::locker_info_t(utime_t(), entity_addr, "")}}; ++ reply.tag = lock_tag; ++ reply.lock_type = lock_type; ++ } ++ ++ bufferlist bl; ++ ::encode(reply, bl, CEPH_FEATURES_SUPPORTED_DEFAULT); ++ ++ std::string str(bl.c_str(), bl.length()); ++ expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(0))); ++ } ++ } ++}; ++ ++TEST_F(TestMockManagedLockGetLockerRequest, Success) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4", ++ "auto 123", ManagedLock<>::WATCHER_LOCK_TAG, ++ LOCK_EXCLUSIVE); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(0, ctx.wait()); ++ ++ ASSERT_EQ(entity_name_t::CLIENT(1), locker.entity); ++ ASSERT_EQ("1.2.3.4:0/0", locker.address); ++ ASSERT_EQ("auto 123", locker.cookie); ++ ASSERT_EQ(123U, locker.handle); ++} ++ ++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoError) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, -EINVAL, entity_name_t::CLIENT(1), "", ++ "", "", LOCK_EXCLUSIVE); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(-EINVAL, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoEmpty) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, -ENOENT, entity_name_t::CLIENT(1), "", ++ "", "", LOCK_EXCLUSIVE); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(-ENOENT, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoExternalTag) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4", ++ "auto 123", "external tag", LOCK_EXCLUSIVE); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(-EBUSY, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoShared) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4", ++ "auto 123", ManagedLock<>::WATCHER_LOCK_TAG, ++ LOCK_SHARED); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(-EBUSY, ctx.wait()); ++} ++ ++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoExternalCookie) { ++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK); ++ ++ librbd::ImageCtx *ictx; ++ ASSERT_EQ(0, open_image(m_image_name, &ictx)); ++ ++ MockTestImageCtx mock_image_ctx(*ictx); ++ expect_op_work_queue(mock_image_ctx); ++ ++ InSequence seq; ++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4", ++ "external cookie", ManagedLock<>::WATCHER_LOCK_TAG, ++ LOCK_EXCLUSIVE); ++ ++ C_SaferCond ctx; ++ Locker locker; ++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx, ++ &locker, &ctx); ++ req->send(); ++ ASSERT_EQ(-EBUSY, ctx.wait()); ++} ++ ++} // namespace managed_lock ++} // namespace librbd