Journal.cc
LibrbdAdminSocketHook.cc
LibrbdWriteback.cc
++ ManagedLock.cc
MirroringWatcher.cc
ObjectMap.cc
- ObjectWatcher.cc
Operations.cc
Utils.cc
cache/ImageWriteback.cc
cache/PassthroughImageCache.cc
- exclusive_lock/AcquireRequest.cc
+ Watcher.cc
+ watcher/Types.cc
+ watcher/RewatchRequest.cc
+ managed_lock/AcquireRequest.cc
++ managed_lock/BreakRequest.cc
++ managed_lock/GetLockerRequest.cc
+ managed_lock/ReleaseRequest.cc
+ managed_lock/ReacquireRequest.cc
- ManagedLock.cc
+ exclusive_lock/AutomaticPolicy.cc
- exclusive_lock/BreakRequest.cc
- exclusive_lock/GetLockerRequest.cc
- exclusive_lock/ReacquireRequest.cc
- exclusive_lock/ReleaseRequest.cc
+ exclusive_lock/PreAcquireRequest.cc
+ exclusive_lock/PostAcquireRequest.cc
+ exclusive_lock/PreReleaseRequest.cc
- exclusive_lock/AutomaticPolicy.cc
exclusive_lock/StandardPolicy.cc
image/CloseRequest.cc
image/CreateRequest.cc
template <typename I>
void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
- ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << dendl;
- {
- Mutex::Locker locker(m_lock);
- assert(!is_shutdown());
- execute_action(ACTION_SHUT_DOWN, on_shut_down);
- }
+ ML<I>::shut_down(on_shut_down);
// if stalled in request state machine -- abort
- handle_peer_notification();
+ handle_peer_notification(0);
}
template <typename I>
- void ExclusiveLock<I>::try_lock(Context *on_tried_lock) {
- int r = 0;
- {
- Mutex::Locker locker(m_lock);
- assert(m_image_ctx.owner_lock.is_locked());
- if (is_shutdown()) {
- r = -ESHUTDOWN;
- } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
- ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
- execute_action(ACTION_TRY_LOCK, on_tried_lock);
- return;
- }
-void ExclusiveLock<I>::handle_peer_notification() {
++void ExclusiveLock<I>::handle_peer_notification(int r) {
+ Mutex::Locker locker(ML<I>::m_lock);
+ if (ML<I>::m_state != ML<I>::STATE_WAITING_FOR_LOCK) {
+ return;
}
- on_tried_lock->complete(r);
- }
+ ldout(m_image_ctx.cct, 10) << dendl;
+ assert(ML<I>::get_active_action() == ML<I>::ACTION_ACQUIRE_LOCK);
+
- template <typename I>
- void ExclusiveLock<I>::request_lock(Context *on_locked) {
- int r = 0;
- {
- Mutex::Locker locker(m_lock);
- assert(m_image_ctx.owner_lock.is_locked());
- if (is_shutdown()) {
- r = -ESHUTDOWN;
- } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
- ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
- execute_action(ACTION_REQUEST_LOCK, on_locked);
- return;
- }
- }
-
- if (on_locked != nullptr) {
- on_locked->complete(r);
- }
++ m_acquire_lock_peer_ret_val = r;
+ ML<I>::execute_next_action();
}
template <typename I>
}
template <typename I>
- void ExclusiveLock<I>::handle_peer_notification(int r) {
+ void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
++ int acquire_lock_peer_ret_val = 0;
+ {
- Mutex::Locker locker(m_lock);
- if (m_state != STATE_WAITING_FOR_PEER) {
- return;
- }
-
- ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
- assert(get_active_action() == ACTION_REQUEST_LOCK);
++ Mutex::Locker locker(ML<I>::m_lock);
++ std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
++ }
+
- if (r >= 0) {
- execute_next_action();
- return;
- }
++ if (acquire_lock_peer_ret_val == -EROFS) {
++ ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
++ on_finish->complete(acquire_lock_peer_ret_val);
++ return;
+ }
+
- handle_acquire_lock(r);
+ PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
+ on_finish);
+ m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
+ req->send();
+ }));
}
template <typename I>
- std::string ExclusiveLock<I>::encode_lock_cookie() const {
- assert(m_lock.is_locked());
+ void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
+ ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
- assert(m_watch_handle != 0);
- std::ostringstream ss;
- ss << WATCHER_LOCK_COOKIE_PREFIX << " " << m_watch_handle;
- return ss.str();
- }
--
- template <typename I>
- bool ExclusiveLock<I>::decode_lock_cookie(const std::string &tag,
- uint64_t *handle) {
- std::string prefix;
- std::istringstream ss(tag);
- if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) {
- return false;
- }
- return true;
- }
- if (r < 0) {
++ if (r == -EROFS) {
++ // peer refused to release the exclusive lock
++ on_finish->complete(r);
++ return;
++ } else if (r < 0) {
+ ML<I>::m_lock.Lock();
+ assert(ML<I>::m_state == ML<I>::STATE_ACQUIRING);
- template <typename I>
- bool ExclusiveLock<I>::is_transition_state() const {
- switch (m_state) {
- case STATE_INITIALIZING:
- case STATE_ACQUIRING:
- case STATE_WAITING_FOR_PEER:
- case STATE_WAITING_FOR_REGISTER:
- case STATE_POST_ACQUIRING:
- case STATE_REACQUIRING:
- case STATE_PRE_RELEASING:
- case STATE_RELEASING:
- case STATE_PRE_SHUTTING_DOWN:
- case STATE_SHUTTING_DOWN:
- return true;
- case STATE_UNINITIALIZED:
- case STATE_UNLOCKED:
- case STATE_LOCKED:
- case STATE_SHUTDOWN:
- break;
- }
- return false;
- }
+ // PostAcquire state machine will not run, so we need complete prepare
+ m_image_ctx.state->handle_prepare_lock_complete();
- template <typename I>
- void ExclusiveLock<I>::append_context(Action action, Context *ctx) {
- assert(m_lock.is_locked());
+ typename ML<I>::Action action = ML<I>::get_active_action();
+ if (action == ML<I>::ACTION_ACQUIRE_LOCK && r < 0 && r != -EBLACKLISTED) {
+ ML<I>::m_state = ML<I>::STATE_WAITING_FOR_LOCK;
+ ML<I>::m_lock.Unlock();
- for (auto &action_ctxs : m_actions_contexts) {
- if (action == action_ctxs.first) {
- if (ctx != nullptr) {
- action_ctxs.second.push_back(ctx);
- }
+ // request the lock from a peer
+ m_image_ctx.image_watcher->notify_request_lock();
-
return;
}
- }
-
- Contexts contexts;
- if (ctx != nullptr) {
- contexts.push_back(ctx);
- }
- m_actions_contexts.push_back({action, std::move(contexts)});
- }
-
- template <typename I>
- void ExclusiveLock<I>::execute_action(Action action, Context *ctx) {
- assert(m_lock.is_locked());
-
- append_context(action, ctx);
- if (!is_transition_state()) {
- execute_next_action();
- }
- }
-
- template <typename I>
- void ExclusiveLock<I>::execute_next_action() {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
- switch (get_active_action()) {
- case ACTION_TRY_LOCK:
- case ACTION_REQUEST_LOCK:
- send_acquire_lock();
- break;
- case ACTION_REACQUIRE_LOCK:
- send_reacquire_lock();
- break;
- case ACTION_RELEASE_LOCK:
- send_release_lock();
- break;
- case ACTION_SHUT_DOWN:
- send_shutdown();
- break;
- default:
- assert(false);
- break;
- }
- }
-
- template <typename I>
- typename ExclusiveLock<I>::Action ExclusiveLock<I>::get_active_action() const {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
- return m_actions_contexts.front().first;
- }
-
- template <typename I>
- void ExclusiveLock<I>::complete_active_action(State next_state, int r) {
- assert(m_lock.is_locked());
- assert(!m_actions_contexts.empty());
-
- ActionContexts action_contexts(std::move(m_actions_contexts.front()));
- m_actions_contexts.pop_front();
- m_state = next_state;
-
- m_lock.Unlock();
- for (auto ctx : action_contexts.second) {
- ctx->complete(r);
- }
- m_lock.Lock();
-
- if (!is_transition_state() && !m_actions_contexts.empty()) {
- execute_next_action();
- }
- }
-
- template <typename I>
- bool ExclusiveLock<I>::is_shutdown() const {
- assert(m_lock.is_locked());
-
- return ((m_state == STATE_SHUTDOWN) ||
- (!m_actions_contexts.empty() &&
- m_actions_contexts.back().first == ACTION_SHUT_DOWN));
- }
-
- template <typename I>
- void ExclusiveLock<I>::handle_init_complete() {
- ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
- m_state = STATE_UNLOCKED;
- }
-
- template <typename I>
- void ExclusiveLock<I>::send_acquire_lock() {
- assert(m_lock.is_locked());
- if (m_state == STATE_LOCKED) {
- complete_active_action(STATE_LOCKED, 0);
- return;
- }
-
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 10) << this << " " << __func__ << dendl;
- m_state = STATE_ACQUIRING;
+ ML<I>::m_lock.Unlock();
+ if (r == -EAGAIN) {
+ r = 0;
+ }
- m_watch_handle = m_image_ctx.image_watcher->get_watch_handle();
- if (m_watch_handle == 0) {
- lderr(cct) << "image watcher not registered - delaying request" << dendl;
- m_state = STATE_WAITING_FOR_REGISTER;
+ on_finish->complete(r);
return;
}
void init(uint64_t features, Context *on_init);
void shut_down(Context *on_shutdown);
- void try_lock(Context *on_tried_lock);
- void request_lock(Context *on_locked);
- void release_lock(Context *on_released);
-
- void reacquire_lock(Context *on_reacquired = nullptr);
-
- void handle_peer_notification();
+ void handle_peer_notification(int r);
- static bool decode_lock_cookie(const std::string &cookie, uint64_t *handle);
+ protected:
+ virtual void shutdown_handler(int r, Context *on_finish);
+ virtual void pre_acquire_lock_handler(Context *on_finish);
+ virtual void post_acquire_lock_handler(int r, Context *on_finish);
+ virtual void pre_release_lock_handler(bool shutting_down,
+ Context *on_finish);
+ virtual void post_release_lock_handler(bool shutting_down, int r,
+ Context *on_finish);
private:
uint32_t m_request_blocked_count = 0;
int m_request_blocked_ret_val = 0;
- std::string encode_lock_cookie() const;
-
- bool is_transition_state() const;
-
- void append_context(Action action, Context *ctx);
- void execute_action(Action action, Context *ctx);
- void execute_next_action();
-
- Action get_active_action() const;
- void complete_active_action(State next_state, int r);
-
- bool is_shutdown() const;
++ int m_acquire_lock_peer_ret_val = 0;
+
void handle_init_complete();
-
- void send_acquire_lock();
- void handle_acquiring_lock(int r);
- void handle_acquire_lock(int r);
-
- void send_reacquire_lock();
- void handle_reacquire_lock(int r);
-
- void send_release_lock();
- void handle_releasing_lock(int r);
- void handle_release_lock(int r);
-
- void send_shutdown();
- void send_shutdown_release();
- void handle_shutdown_releasing(int r);
- void handle_shutdown_released(int r);
- void handle_shutdown(int r);
- void complete_shutdown(int r);
+ void handle_post_acquiring_lock(int r);
+ void handle_post_acquired_lock(int r);
+ void handle_pre_releasing_lock(int r);
};
} // namespace librbd
--- /dev/null
-template <typename I>
-void ManagedLock<I>::assert_locked(librados::ObjectWriteOperation *op,
- ClsLockType type) {
- Mutex::Locker locker(m_lock);
- rados::cls::lock::assert_locked(op, RBD_LOCK_NAME, type, m_cookie,
- WATCHER_LOCK_TAG);
-}
-
+ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+ // vim: ts=8 sw=2 smarttab
+
+ #include "librbd/ManagedLock.h"
+ #include "librbd/managed_lock/AcquireRequest.h"
+ #include "librbd/managed_lock/ReleaseRequest.h"
+ #include "librbd/managed_lock/ReacquireRequest.h"
+ #include "librbd/Watcher.h"
+ #include "librbd/ImageCtx.h"
+ #include "cls/lock/cls_lock_client.h"
+ #include "common/dout.h"
+ #include "common/errno.h"
+ #include "common/WorkQueue.h"
+ #include "librbd/Utils.h"
+ #include <sstream>
+
+ #define dout_subsys ceph_subsys_rbd
+ #undef dout_prefix
+ #define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " << __func__
+
+ namespace librbd {
+
+ using std::string;
+
+ namespace {
+
+ const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto";
+
+ template <typename R>
+ struct C_SendLockRequest : public Context {
+ R* request;
+ explicit C_SendLockRequest(R* request) : request(request) {
+ }
+ virtual void finish(int r) override {
+ request->send();
+ }
+ };
+
+ } // anonymous namespace
+
+ template <typename I>
+ const std::string ManagedLock<I>::WATCHER_LOCK_TAG("internal");
+
+ template <typename I>
+ ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue,
+ const string& oid, Watcher *watcher)
+ : m_lock(util::unique_lock_name("librbd::ManagedLock<I>::m_lock", this)),
+ m_state(STATE_UNLOCKED),
+ m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
+ m_work_queue(work_queue),
+ m_oid(oid),
+ m_watcher(watcher) {
+ }
+
+ template <typename I>
+ ManagedLock<I>::~ManagedLock() {
+ Mutex::Locker locker(m_lock);
+ assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
+ m_state == STATE_UNINITIALIZED);
+ }
+
+ template <typename I>
+ bool ManagedLock<I>::is_lock_owner() const {
+ Mutex::Locker locker(m_lock);
+
+ bool lock_owner;
+
+ switch (m_state) {
+ case STATE_LOCKED:
+ case STATE_REACQUIRING:
+ case STATE_PRE_SHUTTING_DOWN:
+ case STATE_POST_ACQUIRING:
+ case STATE_PRE_RELEASING:
+ lock_owner = true;
+ break;
+ default:
+ lock_owner = false;
+ break;
+ }
+
+ ldout(m_cct, 20) << "=" << lock_owner << dendl;
+ return lock_owner;
+ }
+
+ template <typename I>
+ void ManagedLock<I>::shut_down(Context *on_shut_down) {
+ ldout(m_cct, 10) << dendl;
+
+ Mutex::Locker locker(m_lock);
+ assert(!is_shutdown_locked());
+ execute_action(ACTION_SHUT_DOWN, on_shut_down);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::acquire_lock(Context *on_acquired) {
+ int r = 0;
+ {
+ Mutex::Locker locker(m_lock);
+ if (is_shutdown_locked()) {
+ r = -ESHUTDOWN;
+ } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
+ ldout(m_cct, 10) << dendl;
+ execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
+ return;
+ }
+ }
+
+ on_acquired->complete(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
+ int r = 0;
+ {
+ Mutex::Locker locker(m_lock);
+ if (is_shutdown_locked()) {
+ r = -ESHUTDOWN;
+ } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
+ ldout(m_cct, 10) << dendl;
+ execute_action(ACTION_TRY_LOCK, on_acquired);
+ return;
+ }
+ }
+
+ on_acquired->complete(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::release_lock(Context *on_released) {
+ int r = 0;
+ {
+ Mutex::Locker locker(m_lock);
+ if (is_shutdown_locked()) {
+ r = -ESHUTDOWN;
+ } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
+ ldout(m_cct, 10) << dendl;
+ execute_action(ACTION_RELEASE_LOCK, on_released);
+ return;
+ }
+ }
+
+ on_released->complete(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
+ {
+ Mutex::Locker locker(m_lock);
+
+ if (m_state == STATE_WAITING_FOR_REGISTER) {
+ // restart the acquire lock process now that watch is valid
+ ldout(m_cct, 10) << ": " << "woke up waiting acquire" << dendl;
+ Action active_action = get_active_action();
+ assert(active_action == ACTION_TRY_LOCK ||
+ active_action == ACTION_ACQUIRE_LOCK);
+ execute_next_action();
+ } else if (!is_shutdown_locked() &&
+ (m_state == STATE_LOCKED ||
+ m_state == STATE_ACQUIRING ||
+ m_state == STATE_POST_ACQUIRING ||
+ m_state == STATE_WAITING_FOR_LOCK)) {
+ // interlock the lock operation with other state ops
+ ldout(m_cct, 10) << dendl;
+ execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
+ return;
+ }
+ }
+
+ // ignore request if shutdown or not in a locked-related state
+ if (on_reacquired != nullptr) {
+ on_reacquired->complete(0);
+ }
+ }
+
+ template <typename I>
+ void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
+ on_finish->complete(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
+ on_finish->complete(0);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
+ on_finish->complete(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
+ Context *on_finish) {
+ {
+ Mutex::Locker locker(m_lock);
+ m_state = shutting_down ? STATE_SHUTTING_DOWN : STATE_RELEASING;
+ }
+ on_finish->complete(0);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
+ Context *on_finish) {
+ on_finish->complete(r);
+ }
+
+ template <typename I>
+ bool ManagedLock<I>::decode_lock_cookie(const std::string &tag,
+ uint64_t *handle) {
+ std::string prefix;
+ std::istringstream ss(tag);
+ if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) {
+ return false;
+ }
+ return true;
+ }
+
+ template <typename I>
+ string ManagedLock<I>::encode_lock_cookie(uint64_t watch_handle) {
+ assert(watch_handle != 0);
+ std::ostringstream ss;
+ ss << WATCHER_LOCK_COOKIE_PREFIX << " " << watch_handle;
+ return ss.str();
+ }
+
+ template <typename I>
+ bool ManagedLock<I>::is_transition_state() const {
+ switch (m_state) {
+ case STATE_ACQUIRING:
+ case STATE_WAITING_FOR_REGISTER:
+ case STATE_REACQUIRING:
+ case STATE_RELEASING:
+ case STATE_PRE_SHUTTING_DOWN:
+ case STATE_SHUTTING_DOWN:
+ case STATE_INITIALIZING:
+ case STATE_WAITING_FOR_LOCK:
+ case STATE_POST_ACQUIRING:
+ case STATE_PRE_RELEASING:
+ return true;
+ case STATE_UNLOCKED:
+ case STATE_LOCKED:
+ case STATE_SHUTDOWN:
+ case STATE_UNINITIALIZED:
+ break;
+ }
+ return false;
+ }
+
+ template <typename I>
+ void ManagedLock<I>::append_context(Action action, Context *ctx) {
+ assert(m_lock.is_locked());
+
+ for (auto &action_ctxs : m_actions_contexts) {
+ if (action == action_ctxs.first) {
+ if (ctx != nullptr) {
+ action_ctxs.second.push_back(ctx);
+ }
+ return;
+ }
+ }
+
+ Contexts contexts;
+ if (ctx != nullptr) {
+ contexts.push_back(ctx);
+ }
+ m_actions_contexts.push_back({action, std::move(contexts)});
+ }
+
+ template <typename I>
+ void ManagedLock<I>::execute_action(Action action, Context *ctx) {
+ assert(m_lock.is_locked());
+
+ append_context(action, ctx);
+ if (!is_transition_state()) {
+ execute_next_action();
+ }
+ }
+
+ template <typename I>
+ void ManagedLock<I>::execute_next_action() {
+ assert(m_lock.is_locked());
+ assert(!m_actions_contexts.empty());
+ switch (get_active_action()) {
+ case ACTION_ACQUIRE_LOCK:
+ case ACTION_TRY_LOCK:
+ send_acquire_lock();
+ break;
+ case ACTION_REACQUIRE_LOCK:
+ send_reacquire_lock();
+ break;
+ case ACTION_RELEASE_LOCK:
+ send_release_lock();
+ break;
+ case ACTION_SHUT_DOWN:
+ send_shutdown();
+ break;
+ default:
+ assert(false);
+ break;
+ }
+ }
+
+ template <typename I>
+ typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
+ assert(m_lock.is_locked());
+ assert(!m_actions_contexts.empty());
+ return m_actions_contexts.front().first;
+ }
+
+ template <typename I>
+ void ManagedLock<I>::complete_active_action(State next_state, int r) {
+ assert(m_lock.is_locked());
+ assert(!m_actions_contexts.empty());
+
+ ActionContexts action_contexts(std::move(m_actions_contexts.front()));
+ m_actions_contexts.pop_front();
+ m_state = next_state;
+
+ m_lock.Unlock();
+ for (auto ctx : action_contexts.second) {
+ ctx->complete(r);
+ }
+ m_lock.Lock();
+
+ if (!is_transition_state() && !m_actions_contexts.empty()) {
+ execute_next_action();
+ }
+ }
+
+ template <typename I>
+ bool ManagedLock<I>::is_shutdown_locked() const {
+ assert(m_lock.is_locked());
+
+ return ((m_state == STATE_SHUTDOWN) ||
+ (!m_actions_contexts.empty() &&
+ m_actions_contexts.back().first == ACTION_SHUT_DOWN));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::send_acquire_lock() {
+ assert(m_lock.is_locked());
+ if (m_state == STATE_LOCKED) {
+ complete_active_action(STATE_LOCKED, 0);
+ return;
+ }
+
+ ldout(m_cct, 10) << dendl;
+ m_state = STATE_ACQUIRING;
+
+ uint64_t watch_handle = m_watcher->get_watch_handle();
+ if (watch_handle == 0) {
+ lderr(m_cct) << "watcher not registered - delaying request" << dendl;
+ m_state = STATE_WAITING_FOR_REGISTER;
+ return;
+ }
+ m_cookie = ManagedLock<I>::encode_lock_cookie(watch_handle);
+
+ m_work_queue->queue(new FunctionContext([this](int r) {
+ pre_acquire_lock_handler(util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
+ }));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_pre_acquire_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ if (r < 0) {
+ handle_acquire_lock(r);
+ return;
+ }
+
+ using managed_lock::AcquireRequest;
+ AcquireRequest<I>* req = AcquireRequest<I>::create(m_ioctx, m_watcher,
+ m_work_queue, m_oid, m_cookie,
+ util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
+ m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_acquire_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ if (r == -EBUSY || r == -EAGAIN) {
+ ldout(m_cct, 5) << ": unable to acquire exclusive lock" << dendl;
+ } else if (r < 0) {
+ lderr(m_cct) << ": failed to acquire exclusive lock:" << cpp_strerror(r)
+ << dendl;
+ } else {
+ ldout(m_cct, 5) << ": successfully acquired exclusive lock" << dendl;
+ }
+
+ m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
+
+ m_work_queue->queue(new FunctionContext([this, r](int ret) {
+ post_acquire_lock_handler(r, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
+ }));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_post_acquire_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ Mutex::Locker locker(m_lock);
+
+ if (r < 0 && m_post_next_state == STATE_LOCKED) {
+ // release_lock without calling pre and post handlers
+ revert_to_unlock_state(r);
+ } else {
+ complete_active_action(m_post_next_state, r);
+ }
+ }
+
+ template <typename I>
+ void ManagedLock<I>::revert_to_unlock_state(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ using managed_lock::ReleaseRequest;
+ ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
+ m_work_queue, m_oid, m_cookie,
+ new FunctionContext([this, r](int ret) {
+ Mutex::Locker locker(m_lock);
+ assert(ret == 0);
+ complete_active_action(STATE_UNLOCKED, r);
+ }));
+ m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::send_reacquire_lock() {
+ assert(m_lock.is_locked());
+
+ if (m_state != STATE_LOCKED) {
+ complete_active_action(m_state, 0);
+ return;
+ }
+
+ uint64_t watch_handle = m_watcher->get_watch_handle();
+ if (watch_handle == 0) {
+ // watch (re)failed while recovering
+ lderr(m_cct) << ": aborting reacquire due to invalid watch handle"
+ << dendl;
+ complete_active_action(STATE_LOCKED, 0);
+ return;
+ }
+
+ m_new_cookie = ManagedLock<I>::encode_lock_cookie(watch_handle);
+ if (m_cookie == m_new_cookie) {
+ ldout(m_cct, 10) << ": skipping reacquire since cookie still valid"
+ << dendl;
+ complete_active_action(STATE_LOCKED, 0);
+ return;
+ }
+
+ ldout(m_cct, 10) << dendl;
+ m_state = STATE_REACQUIRING;
+
+ using managed_lock::ReacquireRequest;
+ ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
+ m_cookie, m_new_cookie,
+ util::create_context_callback<
+ ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this));
+ m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_reacquire_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ Mutex::Locker locker(m_lock);
+ assert(m_state == STATE_REACQUIRING);
+
+ if (r < 0) {
+ if (r == -EOPNOTSUPP) {
+ ldout(m_cct, 10) << ": updating lock is not supported" << dendl;
+ } else {
+ lderr(m_cct) << ": failed to update lock cookie: " << cpp_strerror(r)
+ << dendl;
+ }
+
+ if (!is_shutdown_locked()) {
+ // queue a release and re-acquire of the lock since cookie cannot
+ // be updated on older OSDs
+ execute_action(ACTION_RELEASE_LOCK, nullptr);
+
+ assert(!m_actions_contexts.empty());
+ ActionContexts &action_contexts(m_actions_contexts.front());
+
+ // reacquire completes when the request lock completes
+ Contexts contexts;
+ std::swap(contexts, action_contexts.second);
+ if (contexts.empty()) {
+ execute_action(ACTION_ACQUIRE_LOCK, nullptr);
+ } else {
+ for (auto ctx : contexts) {
+ ctx = new FunctionContext([ctx, r](int acquire_ret_val) {
+ if (acquire_ret_val >= 0) {
+ acquire_ret_val = r;
+ }
+ ctx->complete(acquire_ret_val);
+ });
+ execute_action(ACTION_ACQUIRE_LOCK, ctx);
+ }
+ }
+ }
+ } else {
+ m_cookie = m_new_cookie;
+ }
+
+ complete_active_action(STATE_LOCKED, r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::send_release_lock() {
+ assert(m_lock.is_locked());
+ if (m_state == STATE_UNLOCKED) {
+ complete_active_action(STATE_UNLOCKED, 0);
+ return;
+ }
+
+ ldout(m_cct, 10) << dendl;
+ m_state = STATE_PRE_RELEASING;
+
+ m_work_queue->queue(new FunctionContext([this](int r) {
+ pre_release_lock_handler(false, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
+ }));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_pre_release_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ if (r < 0) {
+ handle_release_lock(r);
+ return;
+ }
+
+ using managed_lock::ReleaseRequest;
+ ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
+ m_work_queue, m_oid, m_cookie,
+ util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
+ m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_release_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ Mutex::Locker locker(m_lock);
+ assert(m_state == STATE_RELEASING);
+
+ if (r >= 0) {
+ m_cookie = "";
+ }
+
+ m_post_next_state = r < 0 ? STATE_LOCKED : STATE_UNLOCKED;
+
+ m_work_queue->queue(new FunctionContext([this, r](int ret) {
+ post_release_lock_handler(false, r, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
+ }));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_post_release_lock(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ Mutex::Locker locker(m_lock);
+ complete_active_action(m_post_next_state, r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::send_shutdown() {
+ ldout(m_cct, 10) << dendl;
+ assert(m_lock.is_locked());
+ if (m_state == STATE_UNLOCKED) {
+ m_state = STATE_SHUTTING_DOWN;
+ m_work_queue->queue(new FunctionContext([this](int r) {
+ shutdown_handler(r, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
+ }));
+ return;
+ }
+
+ ldout(m_cct, 10) << dendl;
+ assert(m_state == STATE_LOCKED);
+ m_state = STATE_PRE_SHUTTING_DOWN;
+
+ m_lock.Unlock();
+ m_work_queue->queue(new C_ShutDownRelease(this), 0);
+ m_lock.Lock();
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_shutdown(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ complete_shutdown(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::send_shutdown_release() {
+ ldout(m_cct, 10) << dendl;
+
+ Mutex::Locker locker(m_lock);
+
+ m_work_queue->queue(new FunctionContext([this](int r) {
+ pre_release_lock_handler(true, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
+ }));
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_shutdown_pre_release(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ std::string cookie;
+ {
+ Mutex::Locker locker(m_lock);
+ cookie = m_cookie;
+ }
+
+ using managed_lock::ReleaseRequest;
+ ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
+ m_work_queue, m_oid, cookie,
+ new FunctionContext([this](int r) {
+ post_release_lock_handler(true, r, util::create_context_callback<
+ ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
+ }));
+ req->send();
+
+ }
+
+ template <typename I>
+ void ManagedLock<I>::handle_shutdown_post_release(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ complete_shutdown(r);
+ }
+
+ template <typename I>
+ void ManagedLock<I>::complete_shutdown(int r) {
+ ldout(m_cct, 10) << ": r=" << r << dendl;
+
+ if (r < 0) {
+ lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
+ << dendl;
+ }
+
+ ActionContexts action_contexts;
+ {
+ Mutex::Locker locker(m_lock);
+ assert(m_lock.is_locked());
+ assert(m_actions_contexts.size() == 1);
+
+ action_contexts = std::move(m_actions_contexts.front());
+ m_actions_contexts.pop_front();
+ m_state = STATE_SHUTDOWN;
+ }
+
+ // expect to be destroyed after firing callback
+ for (auto ctx : action_contexts.second) {
+ ctx->complete(r);
+ }
+ }
+
+ } // namespace librbd
+
+ template class librbd::ManagedLock<librbd::ImageCtx>;
--- /dev/null
-template <typename ImageCtxT>
+ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+ // vim: ts=8 sw=2 smarttab
+
+ #ifndef CEPH_LIBRBD_MANAGED_LOCK_H
+ #define CEPH_LIBRBD_MANAGED_LOCK_H
+
+ #include "include/int_types.h"
+ #include "include/Context.h"
+ #include "include/rados/librados.hpp"
+ #include "cls/lock/cls_lock_types.h"
+ #include "librbd/watcher/Types.h"
+ #include "common/Mutex.h"
+ #include <list>
+ #include <string>
+ #include <utility>
+
+ class ContextWQ;
+
+ namespace librbd {
+
- void assert_locked(librados::ObjectWriteOperation *op, ClsLockType type);
-
++struct ImageCtx;
++
++template <typename ImageCtxT = librbd::ImageCtx>
+ class ManagedLock {
+ private:
+ typedef watcher::Traits<ImageCtxT> TypeTraits;
+ typedef typename TypeTraits::Watcher Watcher;
+
+ public:
+ static const std::string WATCHER_LOCK_TAG;
+
+ static ManagedLock *create(librados::IoCtx& ioctx, ContextWQ *work_queue,
+ const std::string& oid, Watcher *watcher) {
+ return new ManagedLock(ioctx, work_queue, oid, watcher);
+ }
+
+ ManagedLock(librados::IoCtx& ioctx, ContextWQ *work_queue,
+ const std::string& oid, Watcher *watcher);
+ virtual ~ManagedLock();
+
+ bool is_lock_owner() const;
+
+ void shut_down(Context *on_shutdown);
+ void acquire_lock(Context *on_acquired);
+ void try_acquire_lock(Context *on_acquired);
+ void release_lock(Context *on_released);
+ void reacquire_lock(Context *on_reacquired = nullptr);
+
+ bool is_shutdown() const {
+ Mutex::Locker l(m_lock);
+ return is_shutdown_locked();
+ }
+
+ bool is_locked_state() const {
+ return m_state == STATE_LOCKED;
+ }
+
+ static bool decode_lock_cookie(const std::string &tag, uint64_t *handle);
+
+ protected:
+
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * |
+ * v (acquire_lock)
+ * UNLOCKED -----------------------------------------> ACQUIRING
+ * ^ |
+ * | |
+ * RELEASING |
+ * | |
+ * | |
+ * | (release_lock) v
+ * PRE_RELEASING <----------------------------------------- LOCKED
+ *
+ * <LOCKED state>
+ * |
+ * v
+ * REACQUIRING -------------------------------------> <finish>
+ * . ^
+ * . |
+ * . . . > <RELEASE action> ---> <ACQUIRE action> ---/
+ *
+ * <UNLOCKED/LOCKED states>
+ * |
+ * |
+ * v
+ * PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
+ *
+ * @endverbatim
+ */
+ enum State {
+ STATE_UNINITIALIZED,
+ STATE_INITIALIZING,
+ STATE_UNLOCKED,
+ STATE_LOCKED,
+ STATE_ACQUIRING,
+ STATE_POST_ACQUIRING,
+ STATE_WAITING_FOR_REGISTER,
+ STATE_WAITING_FOR_LOCK,
+ STATE_REACQUIRING,
+ STATE_PRE_RELEASING,
+ STATE_RELEASING,
+ STATE_PRE_SHUTTING_DOWN,
+ STATE_SHUTTING_DOWN,
+ STATE_SHUTDOWN,
+ };
+
+ enum Action {
+ ACTION_TRY_LOCK,
+ ACTION_ACQUIRE_LOCK,
+ ACTION_REACQUIRE_LOCK,
+ ACTION_RELEASE_LOCK,
+ ACTION_SHUT_DOWN
+ };
+
+ mutable Mutex m_lock;
+ State m_state;
+
+ virtual void shutdown_handler(int r, Context *on_finish);
+ virtual void pre_acquire_lock_handler(Context *on_finish);
+ virtual void post_acquire_lock_handler(int r, Context *on_finish);
+ virtual void pre_release_lock_handler(bool shutting_down,
+ Context *on_finish);
+ virtual void post_release_lock_handler(bool shutting_down, int r,
+ Context *on_finish);
+
+ Action get_active_action() const;
+ bool is_shutdown_locked() const;
+ void execute_next_action();
+
+ private:
+ typedef std::list<Context *> Contexts;
+ typedef std::pair<Action, Contexts> ActionContexts;
+ typedef std::list<ActionContexts> ActionsContexts;
+
+ struct C_ShutDownRelease : public Context {
+ ManagedLock *lock;
+ C_ShutDownRelease(ManagedLock *lock)
+ : lock(lock) {
+ }
+ virtual void finish(int r) override {
+ lock->send_shutdown_release();
+ }
+ };
+
+ librados::IoCtx& m_ioctx;
+ CephContext *m_cct;
+ ContextWQ *m_work_queue;
+ std::string m_oid;
+ Watcher *m_watcher;
+
+ std::string m_cookie;
+ std::string m_new_cookie;
+
+ State m_post_next_state;
+
+ ActionsContexts m_actions_contexts;
+
+ static std::string encode_lock_cookie(uint64_t watch_handle);
+
+ bool is_transition_state() const;
+
+ void append_context(Action action, Context *ctx);
+ void execute_action(Action action, Context *ctx);
+
+ void complete_active_action(State next_state, int r);
+
+
+ void send_acquire_lock();
+ void handle_pre_acquire_lock(int r);
+ void handle_acquire_lock(int r);
+ void handle_post_acquire_lock(int r);
+ void revert_to_unlock_state(int r);
+
+ void send_reacquire_lock();
+ void handle_reacquire_lock(int r);
+
+ void send_release_lock();
+ void handle_pre_release_lock(int r);
+ void handle_release_lock(int r);
+ void handle_post_release_lock(int r);
+
+ void send_shutdown();
+ void handle_shutdown(int r);
+ void send_shutdown_release();
+ void handle_shutdown_pre_release(int r);
+ void handle_shutdown_post_release(int r);
+ void complete_shutdown(int r);
+ };
+
+ } // namespace librbd
+
++extern template class librbd::ManagedLock<librbd::ImageCtx>;
++
+ #endif // CEPH_LIBRBD_MANAGED_LOCK_H
--- /dev/null
- save_result(r);
- if (r < 0) {
+ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+ // vim: ts=8 sw=2 smarttab
+
+ #include "librbd/exclusive_lock/PreReleaseRequest.h"
+ #include "common/dout.h"
+ #include "common/errno.h"
+ #include "librbd/AioImageRequestWQ.h"
+ #include "librbd/ExclusiveLock.h"
+ #include "librbd/ManagedLock.h"
+ #include "librbd/ImageState.h"
+ #include "librbd/ImageWatcher.h"
+ #include "librbd/Journal.h"
+ #include "librbd/ObjectMap.h"
+ #include "librbd/Utils.h"
+
+ #define dout_subsys ceph_subsys_rbd
+ #undef dout_prefix
+ #define dout_prefix *_dout << "librbd::exclusive_lock::PreReleaseRequest: "
+
+ namespace librbd {
+ namespace exclusive_lock {
+
+ using util::create_async_context_callback;
+ using util::create_context_callback;
+
+ template <typename I>
+ PreReleaseRequest<I>* PreReleaseRequest<I>::create(I &image_ctx,
+ Context *on_releasing,
+ Context *on_finish,
+ bool shutting_down) {
+ return new PreReleaseRequest(image_ctx, on_releasing, on_finish,
+ shutting_down);
+ }
+
+ template <typename I>
+ PreReleaseRequest<I>::PreReleaseRequest(I &image_ctx, Context *on_releasing,
+ Context *on_finish, bool shutting_down)
+ : m_image_ctx(image_ctx), m_on_releasing(on_releasing),
+ m_on_finish(create_async_context_callback(image_ctx, on_finish)),
+ m_shutting_down(shutting_down), m_error_result(0), m_object_map(nullptr),
+ m_journal(nullptr) {
+ }
+
+ template <typename I>
+ PreReleaseRequest<I>::~PreReleaseRequest() {
+ if (!m_shutting_down) {
+ m_image_ctx.state->handle_prepare_lock_complete();
+ }
+ delete m_on_releasing;
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send() {
+ send_prepare_lock();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_prepare_lock() {
+ if (m_shutting_down) {
+ send_cancel_op_requests();
+ return;
+ }
+
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ // release the lock if the image is not busy performing other actions
+ Context *ctx = create_context_callback<
+ PreReleaseRequest<I>, &PreReleaseRequest<I>::handle_prepare_lock>(this);
+ m_image_ctx.state->prepare_lock(ctx);
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::handle_prepare_lock(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+
+ send_cancel_op_requests();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_cancel_op_requests() {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ using klass = PreReleaseRequest<I>;
+ Context *ctx = create_context_callback<
+ klass, &klass::handle_cancel_op_requests>(this);
+ m_image_ctx.cancel_async_requests(ctx);
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::handle_cancel_op_requests(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+
+ assert(r == 0);
+
+ send_block_writes();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_block_writes() {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ using klass = PreReleaseRequest<I>;
+ Context *ctx = create_context_callback<
+ klass, &klass::handle_block_writes>(this);
+
+ {
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
+ m_image_ctx.aio_work_queue->set_require_lock_on_read();
+ }
+ m_image_ctx.aio_work_queue->block_writes(ctx);
+ }
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::handle_block_writes(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+
- send_image_flush_notifies();
++ if (r == -EBLACKLISTED) {
++ // allow clean shut down if blacklisted
++ lderr(cct) << "failed to block writes because client is blacklisted"
++ << dendl;
++ } else if (r < 0) {
++ lderr(cct) << "failed to block writes: " << cpp_strerror(r) << dendl;
+ m_image_ctx.aio_work_queue->unblock_writes();
++ save_result(r);
+ finish();
+ return;
+ }
+
-void PreReleaseRequest<I>::send_image_flush_notifies() {
++ send_invalidate_cache(false);
+ }
+
+ template <typename I>
- create_context_callback<klass, &klass::handle_image_flush_notifies>(this);
++void PreReleaseRequest<I>::send_invalidate_cache(bool purge_on_error) {
++ if (m_image_ctx.object_cacher == nullptr) {
++ send_flush_notifies();
++ return;
++ }
++
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << __func__ << ": purge_on_error=" << purge_on_error << dendl;
++
++ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
++ Context *ctx = create_async_context_callback(
++ m_image_ctx, create_context_callback<
++ PreReleaseRequest<I>,
++ &PreReleaseRequest<I>::handle_invalidate_cache>(this));
++ m_image_ctx.invalidate_cache(purge_on_error, ctx);
++}
++
++template <typename I>
++void PreReleaseRequest<I>::handle_invalidate_cache(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
++
++ if (r == -EBLACKLISTED) {
++ lderr(cct) << "failed to invalidate cache because client is blacklisted"
++ << dendl;
++ if (!m_image_ctx.is_cache_empty()) {
++ // force purge the cache after after being blacklisted
++ send_invalidate_cache(true);
++ return;
++ }
++ } else if (r < 0 && r != -EBUSY) {
++ lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r)
++ << dendl;
++ m_image_ctx.aio_work_queue->unblock_writes();
++ save_result(r);
++ finish();
++ return;
++ }
++
++ send_flush_notifies();
++}
++
++template <typename I>
++void PreReleaseRequest<I>::send_flush_notifies() {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ using klass = PreReleaseRequest<I>;
+ Context *ctx =
-void PreReleaseRequest<I>::handle_image_flush_notifies(int r) {
++ create_context_callback<klass, &klass::handle_flush_notifies>(this);
+ m_image_ctx.image_watcher->flush(ctx);
+ }
+
+ template <typename I>
++void PreReleaseRequest<I>::handle_flush_notifies(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ assert(r == 0);
+ send_close_journal();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_close_journal() {
+ {
+ RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
+ std::swap(m_journal, m_image_ctx.journal);
+ }
+
+ if (m_journal == nullptr) {
+ send_close_object_map();
+ return;
+ }
+
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ using klass = PreReleaseRequest<I>;
+ Context *ctx = create_context_callback<klass, &klass::handle_close_journal>(
+ this);
+ m_journal->close(ctx);
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::handle_close_journal(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+
+ if (r < 0) {
+ // error implies some journal events were not flushed -- continue
+ lderr(cct) << "failed to close journal: " << cpp_strerror(r) << dendl;
+ }
+
+ delete m_journal;
+
+ send_close_object_map();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_close_object_map() {
+ {
+ RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
+ std::swap(m_object_map, m_image_ctx.object_map);
+ }
+
+ if (m_object_map == nullptr) {
+ send_unlock();
+ return;
+ }
+
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ using klass = PreReleaseRequest<I>;
+ Context *ctx = create_context_callback<
+ klass, &klass::handle_close_object_map>(this);
+ m_object_map->close(ctx);
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::handle_close_object_map(int r) {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+
+ // object map shouldn't return errors
+ assert(r == 0);
+ delete m_object_map;
+
+ send_unlock();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::send_unlock() {
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << __func__ << dendl;
+
+ if (m_on_releasing != nullptr) {
+ // alert caller that we no longer own the exclusive lock
+ m_on_releasing->complete(0);
+ m_on_releasing = nullptr;
+ }
+
+ finish();
+ }
+
+ template <typename I>
+ void PreReleaseRequest<I>::finish() {
+ m_on_finish->complete(m_error_result);
+ delete this;
+ }
+
+ } // namespace exclusive_lock
+ } // namespace librbd
+
+ template class librbd::exclusive_lock::PreReleaseRequest<librbd::ImageCtx>;
--- /dev/null
- void send_image_flush_notifies();
- void handle_image_flush_notifies(int r);
+ // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+ // vim: ts=8 sw=2 smarttab
+
+ #ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
+ #define CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
+
+ #include "librbd/ImageCtx.h"
+ #include <string>
+
+ class Context;
+
+ namespace librbd {
+
+ struct ImageCtx;
+ template <typename> class ManagedLock;
+ template <typename> class Journal;
+
+ namespace exclusive_lock {
+
+ template <typename ImageCtxT = ImageCtx>
+ class PreReleaseRequest {
+ public:
+ static PreReleaseRequest* create(ImageCtxT &image_ctx, Context *on_releasing,
+ Context *on_finish, bool shutting_down);
+
+ ~PreReleaseRequest();
+ void send();
+
+ private:
+ /**
+ * @verbatim
+ *
+ * <start>
+ * |
+ * v
+ * PREPARE_LOCK
+ * |
+ * v
+ * CANCEL_OP_REQUESTS
+ * |
+ * v
+ * BLOCK_WRITES
+ * |
+ * v
++ * INVALIDATE_CACHE
++ * |
++ * v
+ * FLUSH_NOTIFIES . . . . . . . . . . . . . .
+ * | .
+ * v .
+ * CLOSE_JOURNAL .
+ * | (journal disabled, .
+ * v object map enabled) .
+ * CLOSE_OBJECT_MAP < . . . . . . . . . . . .
+ * | .
+ * v (object map disabled) .
+ * <finish> < . . . . . . . . . . . . . . . . .
+ *
+ * @endverbatim
+ */
+
+ PreReleaseRequest(ImageCtxT &image_ctx, Context *on_releasing,
+ Context *on_finish, bool shutting_down);
+
+ ImageCtxT &m_image_ctx;
+ Context *m_on_releasing;
+ Context *m_on_finish;
+ bool m_shutting_down;
+
+ int m_error_result;
+
+ decltype(m_image_ctx.object_map) m_object_map;
+ decltype(m_image_ctx.journal) m_journal;
+
+ void send_prepare_lock();
+ void handle_prepare_lock(int r);
+
+ void send_cancel_op_requests();
+ void handle_cancel_op_requests(int r);
+
+ void send_block_writes();
+ void handle_block_writes(int r);
+
++ void send_invalidate_cache(bool purge_on_error);
++ void handle_invalidate_cache(int r);
++
++ void send_flush_notifies();
++ void handle_flush_notifies(int r);
+
+ void send_close_journal();
+ void handle_close_journal(int r);
+
+ void send_close_object_map();
+ void handle_close_object_map(int r);
+
+ void send_unlock();
+
+ void finish();
+
+ void save_result(int result) {
+ if (m_error_result == 0 && result < 0) {
+ m_error_result = result;
+ }
+ }
+
+ };
+
+ } // namespace exclusive_lock
+ } // namespace librbd
+
+ #endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_PRE_RELEASE_REQUEST_H
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/journal/Types.h"
++#include "librbd/managed_lock/BreakRequest.h"
++#include "librbd/managed_lock/GetLockerRequest.h"
++#include "librbd/managed_lock/Types.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/MirroringWatcher.h"
return 0;
}
- exclusive_lock::Locker locker;
+ int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
+ std::list<std::string> *lock_owners)
+ {
+ CephContext *cct = ictx->cct;
+ ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
+
- auto get_owner_req = exclusive_lock::GetLockerRequest<>::create(
++ managed_lock::Locker locker;
+ C_SaferCond get_owner_ctx;
- exclusive_lock::Locker locker;
++ auto get_owner_req = managed_lock::GetLockerRequest<>::create(
+ *ictx, &locker, &get_owner_ctx);
+ get_owner_req->send();
+
+ int r = get_owner_ctx.wait();
+ if (r == -ENOENT) {
+ return r;
+ } else if (r < 0) {
+ lderr(cct) << "failed to determine current lock owner: "
+ << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ *lock_mode = RBD_LOCK_MODE_EXCLUSIVE;
+ lock_owners->clear();
+ lock_owners->emplace_back(locker.address);
+ return 0;
+ }
+
+ int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
+ const std::string &lock_owner)
+ {
+ CephContext *cct = ictx->cct;
+ ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
+ << "lock_mode=" << lock_mode << ", "
+ << "lock_owner=" << lock_owner << dendl;
+
+ if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
+ return -EOPNOTSUPP;
+ }
+
- auto get_owner_req = exclusive_lock::GetLockerRequest<>::create(
++ managed_lock::Locker locker;
+ C_SaferCond get_owner_ctx;
- auto break_req = exclusive_lock::BreakRequest<>::create(
++ auto get_owner_req = managed_lock::GetLockerRequest<>::create(
+ *ictx, &locker, &get_owner_ctx);
+ get_owner_req->send();
+
+ int r = get_owner_ctx.wait();
+ if (r == -ENOENT) {
+ return r;
+ } else if (r < 0) {
+ lderr(cct) << "failed to determine current lock owner: "
+ << cpp_strerror(r) << dendl;
+ return r;
+ }
+
+ if (locker.address != lock_owner) {
+ return -EBUSY;
+ }
+
+ C_SaferCond break_ctx;
++ auto break_req = managed_lock::BreakRequest<>::create(
+ *ictx, locker, ictx->blacklist_on_break_lock, true, &break_ctx);
+ break_req->send();
+
+ r = break_ctx.wait();
+ if (r == -ENOENT) {
+ return r;
+ } else if (r < 0) {
+ lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
+ return r;
+ }
+ return 0;
+ }
+
int remove(IoCtx& io_ctx, const std::string &image_name,
const std::string &image_id, ProgressContext& prog_ctx,
bool force)
--- /dev/null
--- /dev/null
++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#include "librbd/managed_lock/BreakRequest.h"
++#include "common/dout.h"
++#include "common/errno.h"
++#include "common/WorkQueue.h"
++#include "include/stringify.h"
++#include "cls/lock/cls_lock_client.h"
++#include "cls/lock/cls_lock_types.h"
++#include "librbd/Utils.h"
++#include "librbd/managed_lock/Types.h"
++
++#define dout_subsys ceph_subsys_rbd
++#undef dout_prefix
++#define dout_prefix *_dout << "librbd::managed_lock::BreakRequest: " << this \
++ << " " << __func__ << ": "
++
++namespace librbd {
++namespace managed_lock {
++
++using util::create_context_callback;
++using util::create_rados_ack_callback;
++using util::create_rados_safe_callback;
++
++namespace {
++
++template <typename I>
++struct C_BlacklistClient : public Context {
++ I &image_ctx;
++ std::string locker_address;
++ Context *on_finish;
++
++ C_BlacklistClient(I &image_ctx, const std::string &locker_address,
++ Context *on_finish)
++ : image_ctx(image_ctx), locker_address(locker_address),
++ on_finish(on_finish) {
++ }
++
++ virtual void finish(int r) override {
++ librados::Rados rados(image_ctx.md_ctx);
++ r = rados.blacklist_add(locker_address,
++ image_ctx.blacklist_expire_seconds);
++ on_finish->complete(r);
++ }
++};
++
++} // anonymous namespace
++
++template <typename I>
++void BreakRequest<I>::send() {
++ send_get_watchers();
++}
++
++template <typename I>
++void BreakRequest<I>::send_get_watchers() {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << dendl;
++
++ librados::ObjectReadOperation op;
++ op.list_watchers(&m_watchers, &m_watchers_ret_val);
++
++ using klass = BreakRequest<I>;
++ librados::AioCompletion *rados_completion =
++ create_rados_ack_callback<klass, &klass::handle_get_watchers>(this);
++ m_out_bl.clear();
++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
++ rados_completion, &op, &m_out_bl);
++ assert(r == 0);
++ rados_completion->release();
++}
++
++template <typename I>
++void BreakRequest<I>::handle_get_watchers(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ if (r == 0) {
++ r = m_watchers_ret_val;
++ }
++ if (r < 0) {
++ lderr(cct) << "failed to retrieve watchers: " << cpp_strerror(r)
++ << dendl;
++ finish(r);
++ return;
++ }
++
++ for (auto &watcher : m_watchers) {
++ if ((strncmp(m_locker.address.c_str(),
++ watcher.addr, sizeof(watcher.addr)) == 0) &&
++ (m_locker.handle == watcher.cookie)) {
++ ldout(cct, 10) << "lock owner is still alive" << dendl;
++
++ if (m_force_break_lock) {
++ break;
++ } else {
++ finish(-EAGAIN);
++ return;
++ }
++ }
++ }
++
++ send_blacklist();
++}
++
++template <typename I>
++void BreakRequest<I>::send_blacklist() {
++ if (!m_blacklist_locker) {
++ send_break_lock();
++ return;
++ }
++
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << dendl;
++
++ // TODO: need async version of RadosClient::blacklist_add
++ using klass = BreakRequest<I>;
++ Context *ctx = create_context_callback<klass, &klass::handle_blacklist>(
++ this);
++ m_image_ctx.op_work_queue->queue(new C_BlacklistClient<I>(m_image_ctx,
++ m_locker.address,
++ ctx), 0);
++}
++
++template <typename I>
++void BreakRequest<I>::handle_blacklist(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ if (r < 0) {
++ lderr(cct) << "failed to blacklist lock owner: " << cpp_strerror(r)
++ << dendl;
++ finish(r);
++ return;
++ }
++ send_break_lock();
++}
++
++template <typename I>
++void BreakRequest<I>::send_break_lock() {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << dendl;
++
++ librados::ObjectWriteOperation op;
++ rados::cls::lock::break_lock(&op, RBD_LOCK_NAME, m_locker.cookie,
++ m_locker.entity);
++
++ using klass = BreakRequest<I>;
++ librados::AioCompletion *rados_completion =
++ create_rados_safe_callback<klass, &klass::handle_break_lock>(this);
++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
++ rados_completion, &op);
++ assert(r == 0);
++ rados_completion->release();
++}
++
++template <typename I>
++void BreakRequest<I>::handle_break_lock(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ if (r < 0 && r != -ENOENT) {
++ lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
++ finish(r);
++ return;
++ }
++
++ finish(0);
++}
++
++template <typename I>
++void BreakRequest<I>::finish(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ m_on_finish->complete(r);
++ delete this;
++}
++
++} // namespace managed_lock
++} // namespace librbd
++
++template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>;
--- /dev/null
--- /dev/null
++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#ifndef CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
++#define CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
++
++#include "include/int_types.h"
++#include "include/buffer.h"
++#include "msg/msg_types.h"
++#include "librbd/ImageCtx.h"
++#include <list>
++#include <string>
++#include <boost/optional.hpp>
++
++class Context;
++
++namespace librbd {
++
++template <typename> class Journal;
++
++namespace managed_lock {
++
++struct Locker;
++
++template <typename ImageCtxT = ImageCtx>
++class BreakRequest {
++public:
++ static BreakRequest* create(ImageCtxT &image_ctx, const Locker &locker,
++ bool blacklist_locker, bool force_break_lock,
++ Context *on_finish) {
++ return new BreakRequest(image_ctx, locker, blacklist_locker,
++ force_break_lock, on_finish);
++ }
++
++ void send();
++
++private:
++ /**
++ * @verbatim
++ *
++ * <start>
++ * |
++ * v
++ * GET_WATCHERS
++ * |
++ * v
++ * BLACKLIST (skip if disabled)
++ * |
++ * v
++ * BREAK_LOCK
++ * |
++ * v
++ * <finish>
++ *
++ * @endvertbatim
++ */
++
++ ImageCtxT &m_image_ctx;
++ const Locker &m_locker;
++ bool m_blacklist_locker;
++ bool m_force_break_lock;
++ Context *m_on_finish;
++
++ bufferlist m_out_bl;
++
++ std::list<obj_watch_t> m_watchers;
++ int m_watchers_ret_val;
++
++ BreakRequest(ImageCtxT &image_ctx, const Locker &locker,
++ bool blacklist_locker, bool force_break_lock,
++ Context *on_finish)
++ : m_image_ctx(image_ctx), m_locker(locker),
++ m_blacklist_locker(blacklist_locker),
++ m_force_break_lock(force_break_lock), m_on_finish(on_finish) {
++ }
++
++ void send_get_watchers();
++ void handle_get_watchers(int r);
++
++ void send_blacklist();
++ void handle_blacklist(int r);
++
++ void send_break_lock();
++ void handle_break_lock(int r);
++
++ void finish(int r);
++
++};
++
++} // namespace managed_lock
++} // namespace librbd
++
++extern template class librbd::managed_lock::BreakRequest<librbd::ImageCtx>;
++
++#endif // CEPH_LIBRBD_MANAGED_LOCK_BREAK_REQUEST_H
--- /dev/null
--- /dev/null
++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#include "librbd/managed_lock/GetLockerRequest.h"
++#include "cls/lock/cls_lock_client.h"
++#include "cls/lock/cls_lock_types.h"
++#include "common/dout.h"
++#include "common/errno.h"
++#include "include/stringify.h"
++#include "librbd/ImageCtx.h"
++#include "librbd/ManagedLock.h"
++#include "librbd/Utils.h"
++#include "librbd/managed_lock/Types.h"
++
++#define dout_subsys ceph_subsys_rbd
++#undef dout_prefix
++#define dout_prefix *_dout << "librbd::managed_lock::GetLockerRequest: " \
++ << this << " " << __func__ << ": "
++
++namespace librbd {
++namespace managed_lock {
++
++using util::create_rados_ack_callback;
++
++template <typename I>
++void GetLockerRequest<I>::send() {
++ send_get_lockers();
++}
++
++template <typename I>
++void GetLockerRequest<I>::send_get_lockers() {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << dendl;
++
++ librados::ObjectReadOperation op;
++ rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
++
++ using klass = GetLockerRequest<I>;
++ librados::AioCompletion *rados_completion =
++ create_rados_ack_callback<klass, &klass::handle_get_lockers>(this);
++ m_out_bl.clear();
++ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
++ rados_completion, &op, &m_out_bl);
++ assert(r == 0);
++ rados_completion->release();
++}
++
++template <typename I>
++void GetLockerRequest<I>::handle_get_lockers(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ std::map<rados::cls::lock::locker_id_t,
++ rados::cls::lock::locker_info_t> lockers;
++ ClsLockType lock_type = LOCK_NONE;
++ std::string lock_tag;
++ if (r == 0) {
++ bufferlist::iterator it = m_out_bl.begin();
++ r = rados::cls::lock::get_lock_info_finish(&it, &lockers,
++ &lock_type, &lock_tag);
++ }
++
++ if (r < 0) {
++ lderr(cct) << "failed to retrieve lockers: " << cpp_strerror(r) << dendl;
++ finish(r);
++ return;
++ }
++
++ if (lockers.empty()) {
++ ldout(cct, 20) << "no lockers detected" << dendl;
++ finish(-ENOENT);
++ return;
++ }
++
++ if (lock_tag != ManagedLock<>::WATCHER_LOCK_TAG) {
++ ldout(cct, 5) <<"locked by external mechanism: tag=" << lock_tag << dendl;
++ finish(-EBUSY);
++ return;
++ }
++
++ if (lock_type == LOCK_SHARED) {
++ ldout(cct, 5) << "shared lock type detected" << dendl;
++ finish(-EBUSY);
++ return;
++ }
++
++ std::map<rados::cls::lock::locker_id_t,
++ rados::cls::lock::locker_info_t>::iterator iter = lockers.begin();
++ if (!ManagedLock<>::decode_lock_cookie(iter->first.cookie,
++ &m_locker->handle)) {
++ ldout(cct, 5) << "locked by external mechanism: "
++ << "cookie=" << iter->first.cookie << dendl;
++ finish(-EBUSY);
++ return;
++ }
++
++ m_locker->entity = iter->first.locker;
++ m_locker->cookie = iter->first.cookie;
++ m_locker->address = stringify(iter->second.addr);
++ if (m_locker->cookie.empty() || m_locker->address.empty()) {
++ ldout(cct, 20) << "no valid lockers detected" << dendl;
++ finish(-ENOENT);
++ return;
++ }
++
++ ldout(cct, 10) << "retrieved exclusive locker: "
++ << m_locker->entity << "@" << m_locker->address << dendl;
++ finish(0);
++}
++
++template <typename I>
++void GetLockerRequest<I>::finish(int r) {
++ CephContext *cct = m_image_ctx.cct;
++ ldout(cct, 10) << "r=" << r << dendl;
++
++ m_on_finish->complete(r);
++ delete this;
++}
++
++} // namespace managed_lock
++} // namespace librbd
++
++template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>;
++
--- /dev/null
--- /dev/null
++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#ifndef CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
++#define CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
++
++#include "include/int_types.h"
++#include "include/buffer.h"
++
++class Context;
++
++namespace librbd {
++
++struct ImageCtx;
++
++namespace managed_lock {
++
++struct Locker;
++
++template <typename ImageCtxT = ImageCtx>
++class GetLockerRequest {
++public:
++ static GetLockerRequest* create(ImageCtxT &image_ctx, Locker *locker,
++ Context *on_finish) {
++ return new GetLockerRequest(image_ctx, locker, on_finish);
++ }
++
++ void send();
++
++private:
++ ImageCtxT &m_image_ctx;
++ Locker *m_locker;
++ Context *m_on_finish;
++
++ bufferlist m_out_bl;
++
++ GetLockerRequest(ImageCtxT &image_ctx, Locker *locker, Context *on_finish)
++ : m_image_ctx(image_ctx), m_locker(locker), m_on_finish(on_finish) {
++ }
++
++ void send_get_lockers();
++ void handle_get_lockers(int r);
++
++ void finish(int r);
++
++};
++
++} // namespace managed_lock
++} // namespace librbd
++
++extern template class librbd::managed_lock::GetLockerRequest<librbd::ImageCtx>;
++
++#endif // CEPH_LIBRBD_MANAGED_LOCK_GET_LOCKER_REQUEST_H
--- /dev/null
--- /dev/null
++// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#ifndef CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
++#define CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
++
++#include "msg/msg_types.h"
++#include <string>
++
++namespace librbd {
++namespace managed_lock {
++
++struct Locker {
++ entity_name_t entity;
++ std::string cookie;
++ std::string address;
++ uint64_t handle;
++};
++
++} // namespace managed_lock
++} // namespace librbd
++
++#endif // CEPH_LIBRBD_MANAGED_LOCK_TYPES_H
test_mock_AioImageRequest.cc
test_mock_ExclusiveLock.cc
test_mock_Journal.cc
++ test_mock_ManagedLock.cc
test_mock_ObjectMap.cc
- test_mock_ObjectWatcher.cc
- exclusive_lock/test_mock_AcquireRequest.cc
- exclusive_lock/test_mock_BreakRequest.cc
- exclusive_lock/test_mock_GetLockerRequest.cc
- exclusive_lock/test_mock_ReacquireRequest.cc
- exclusive_lock/test_mock_ReleaseRequest.cc
+ exclusive_lock/test_mock_PreAcquireRequest.cc
+ exclusive_lock/test_mock_PostAcquireRequest.cc
+ exclusive_lock/test_mock_PreReleaseRequest.cc
image/test_mock_RefreshRequest.cc
- image_watcher/test_mock_RewatchRequest.cc
journal/test_mock_OpenRequest.cc
journal/test_mock_PromoteRequest.cc
journal/test_mock_Replay.cc
++ managed_lock/test_mock_AcquireRequest.cc
++ managed_lock/test_mock_BreakRequest.cc
++ managed_lock/test_mock_GetLockerRequest.cc
++ managed_lock/test_mock_ReacquireRequest.cc
++ managed_lock/test_mock_ReleaseRequest.cc
mirror/test_mock_DisableRequest.cc
object_map/test_mock_InvalidateRequest.cc
object_map/test_mock_LockRequest.cc
operation/test_mock_SnapshotRemoveRequest.cc
operation/test_mock_SnapshotRollbackRequest.cc
operation/test_mock_SnapshotUnprotectRequest.cc
- managed_lock/test_mock_ReleaseRequest.cc
- managed_lock/test_mock_ReacquireRequest.cc
- managed_lock/test_mock_AcquireRequest.cc
- test_mock_ManagedLock.cc
+ watcher/test_mock_RewatchRequest.cc
)
add_executable(unittest_librbd
${unittest_librbd_srcs}
--- /dev/null
- MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx,
- &mock_releasing_ctx,
- &ctx, false);
+ // -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+ // vim: ts=8 sw=2 smarttab
+
+ #include "test/librbd/test_mock_fixture.h"
+ #include "test/librbd/test_support.h"
+ #include "test/librbd/mock/MockImageCtx.h"
+ #include "test/librbd/mock/MockJournal.h"
+ #include "test/librbd/mock/MockObjectMap.h"
+ #include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
+ #include "librbd/exclusive_lock/PreReleaseRequest.h"
+ #include "gmock/gmock.h"
+ #include "gtest/gtest.h"
+ #include <list>
+
+ // template definitions
+ #include "librbd/exclusive_lock/PreReleaseRequest.cc"
+ template class librbd::exclusive_lock::PreReleaseRequest<librbd::MockImageCtx>;
+
+ namespace librbd {
+
+ using librbd::ManagedLock;
+
+ namespace exclusive_lock {
+
+ namespace {
+
+ struct MockContext : public Context {
+ MOCK_METHOD1(complete, void(int));
+ MOCK_METHOD1(finish, void(int));
+ };
+
+ } // anonymous namespace
+
+ using ::testing::_;
+ using ::testing::InSequence;
+ using ::testing::Invoke;
+ using ::testing::Return;
+ using ::testing::StrEq;
++using ::testing::WithArg;
+
+ static const std::string TEST_COOKIE("auto 123");
+
+ class TestMockExclusiveLockPreReleaseRequest : public TestMockFixture {
+ public:
+ typedef PreReleaseRequest<MockImageCtx> MockPreReleaseRequest;
+
+ void expect_complete_context(MockContext &mock_context, int r) {
+ EXPECT_CALL(mock_context, complete(r));
+ }
+
+ void expect_test_features(MockImageCtx &mock_image_ctx, uint64_t features,
+ bool enabled) {
+ EXPECT_CALL(mock_image_ctx, test_features(features))
+ .WillOnce(Return(enabled));
+ }
+
+ void expect_set_require_lock_on_read(MockImageCtx &mock_image_ctx) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, set_require_lock_on_read());
+ }
+
+ void expect_block_writes(MockImageCtx &mock_image_ctx, int r) {
+ expect_test_features(mock_image_ctx, RBD_FEATURE_JOURNALING,
+ ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0));
+ if ((mock_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
+ expect_set_require_lock_on_read(mock_image_ctx);
+ }
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, block_writes(_))
+ .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
+ void expect_unblock_writes(MockImageCtx &mock_image_ctx) {
+ EXPECT_CALL(*mock_image_ctx.aio_work_queue, unblock_writes());
+ }
+
+ void expect_cancel_op_requests(MockImageCtx &mock_image_ctx, int r) {
+ EXPECT_CALL(mock_image_ctx, cancel_async_requests(_))
+ .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
+ void expect_close_journal(MockImageCtx &mock_image_ctx,
+ MockJournal &mock_journal, int r) {
+ EXPECT_CALL(mock_journal, close(_))
+ .WillOnce(CompleteContext(r, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
+ void expect_close_object_map(MockImageCtx &mock_image_ctx,
+ MockObjectMap &mock_object_map) {
+ EXPECT_CALL(mock_object_map, close(_))
+ .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
++ void expect_invalidate_cache(MockImageCtx &mock_image_ctx, bool purge,
++ int r) {
++ if (mock_image_ctx.object_cacher != nullptr) {
++ EXPECT_CALL(mock_image_ctx, invalidate_cache(purge, _))
++ .WillOnce(WithArg<1>(CompleteContext(r, NULL)));
++ }
++ }
++
++ void expect_is_cache_empty(MockImageCtx &mock_image_ctx, bool empty) {
++ if (mock_image_ctx.object_cacher != nullptr) {
++ EXPECT_CALL(mock_image_ctx, is_cache_empty())
++ .WillOnce(Return(empty));
++ }
++ }
++
+ void expect_flush_notifies(MockImageCtx &mock_image_ctx) {
+ EXPECT_CALL(*mock_image_ctx.image_watcher, flush(_))
+ .WillOnce(CompleteContext(0, mock_image_ctx.image_ctx->op_work_queue));
+ }
+
+ void expect_prepare_lock(MockImageCtx &mock_image_ctx) {
+ EXPECT_CALL(*mock_image_ctx.state, prepare_lock(_))
+ .WillOnce(Invoke([](Context *on_ready) {
+ on_ready->complete(0);
+ }));
+ }
+
+ void expect_handle_prepare_lock_complete(MockImageCtx &mock_image_ctx) {
+ EXPECT_CALL(*mock_image_ctx.state, handle_prepare_lock_complete());
+ }
+
+ };
+
+ TEST_F(TestMockExclusiveLockPreReleaseRequest, Success) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockImageCtx mock_image_ctx(*ictx);
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+
+ expect_prepare_lock(mock_image_ctx);
+ expect_cancel_op_requests(mock_image_ctx, 0);
+ expect_block_writes(mock_image_ctx, 0);
++ expect_invalidate_cache(mock_image_ctx, false, 0);
+ expect_flush_notifies(mock_image_ctx);
+
+ MockJournal *mock_journal = new MockJournal();
+ mock_image_ctx.journal = mock_journal;
+ expect_close_journal(mock_image_ctx, *mock_journal, -EINVAL);
+
+ MockObjectMap *mock_object_map = new MockObjectMap();
+ mock_image_ctx.object_map = mock_object_map;
+ expect_close_object_map(mock_image_ctx, *mock_object_map);
+
+ MockContext mock_releasing_ctx;
+ expect_complete_context(mock_releasing_ctx, 0);
+ expect_handle_prepare_lock_complete(mock_image_ctx);
+
+ C_SaferCond ctx;
- MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx,
- &release_ctx, &ctx,
- false);
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, &mock_releasing_ctx, &ctx, false);
+ req->send();
+ ASSERT_EQ(0, ctx.wait());
+ }
+
+ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessJournalDisabled) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockImageCtx mock_image_ctx(*ictx);
+
+ expect_block_writes(mock_image_ctx, 0);
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_prepare_lock(mock_image_ctx);
+ expect_cancel_op_requests(mock_image_ctx, 0);
++ expect_invalidate_cache(mock_image_ctx, false, 0);
+ expect_flush_notifies(mock_image_ctx);
+
+ MockObjectMap *mock_object_map = new MockObjectMap();
+ mock_image_ctx.object_map = mock_object_map;
+ expect_close_object_map(mock_image_ctx, *mock_object_map);
+
+ expect_handle_prepare_lock_complete(mock_image_ctx);
+
+ C_SaferCond release_ctx;
+ C_SaferCond ctx;
- MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx,
- &release_ctx, &ctx,
- true);
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, &release_ctx, &ctx, false);
+ req->send();
+ ASSERT_EQ(0, release_ctx.wait());
+ ASSERT_EQ(0, ctx.wait());
+ }
+
+ TEST_F(TestMockExclusiveLockPreReleaseRequest, SuccessObjectMapDisabled) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockImageCtx mock_image_ctx(*ictx);
+
+ expect_block_writes(mock_image_ctx, 0);
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_cancel_op_requests(mock_image_ctx, 0);
++ expect_invalidate_cache(mock_image_ctx, false, 0);
+ expect_flush_notifies(mock_image_ctx);
+
+ C_SaferCond release_ctx;
+ C_SaferCond ctx;
- MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx,
- nullptr, &ctx,
- true);
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, &release_ctx, &ctx, true);
+ req->send();
+ ASSERT_EQ(0, release_ctx.wait());
+ ASSERT_EQ(0, ctx.wait());
+ }
+
++TEST_F(TestMockExclusiveLockPreReleaseRequest, Blacklisted) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_prepare_lock(mock_image_ctx);
++ expect_cancel_op_requests(mock_image_ctx, 0);
++ expect_block_writes(mock_image_ctx, -EBLACKLISTED);
++ expect_invalidate_cache(mock_image_ctx, false, -EBLACKLISTED);
++ expect_is_cache_empty(mock_image_ctx, false);
++ expect_invalidate_cache(mock_image_ctx, true, -EBLACKLISTED);
++ expect_is_cache_empty(mock_image_ctx, true);
++ expect_flush_notifies(mock_image_ctx);
++
++ MockJournal *mock_journal = new MockJournal();
++ mock_image_ctx.journal = mock_journal;
++ expect_close_journal(mock_image_ctx, *mock_journal, -EBLACKLISTED);
++
++ MockObjectMap *mock_object_map = new MockObjectMap();
++ mock_image_ctx.object_map = mock_object_map;
++ expect_close_object_map(mock_image_ctx, *mock_object_map);
++
++ MockContext mock_releasing_ctx;
++ expect_complete_context(mock_releasing_ctx, 0);
++ expect_handle_prepare_lock_complete(mock_image_ctx);
++
++ C_SaferCond ctx;
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, &mock_releasing_ctx, &ctx, false);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++}
++
+ TEST_F(TestMockExclusiveLockPreReleaseRequest, BlockWritesError) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockImageCtx mock_image_ctx(*ictx);
+
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_cancel_op_requests(mock_image_ctx, 0);
+ expect_block_writes(mock_image_ctx, -EINVAL);
+ expect_unblock_writes(mock_image_ctx);
+
+ C_SaferCond ctx;
- MockPreReleaseRequest *req = MockPreReleaseRequest::create(mock_image_ctx,
- nullptr, &ctx,
- true);
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, nullptr, &ctx, true);
+ req->send();
+ ASSERT_EQ(-EINVAL, ctx.wait());
+ }
+
+ TEST_F(TestMockExclusiveLockPreReleaseRequest, UnlockError) {
+ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
+
+ librbd::ImageCtx *ictx;
+ ASSERT_EQ(0, open_image(m_image_name, &ictx));
+
+ MockImageCtx mock_image_ctx(*ictx);
+
+ expect_op_work_queue(mock_image_ctx);
+
+ InSequence seq;
+ expect_cancel_op_requests(mock_image_ctx, 0);
+ expect_block_writes(mock_image_ctx, 0);
++ expect_invalidate_cache(mock_image_ctx, false, 0);
+ expect_flush_notifies(mock_image_ctx);
+
+ C_SaferCond ctx;
++ MockPreReleaseRequest *req = MockPreReleaseRequest::create(
++ mock_image_ctx, nullptr, &ctx, true);
+ req->send();
+ ASSERT_EQ(0, ctx.wait());
+ }
+
+ } // namespace exclusive_lock
+ } // namespace librbd
--- /dev/null
--- /dev/null
++// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#include "test/librbd/test_mock_fixture.h"
++#include "test/librbd/test_support.h"
++#include "test/librbd/mock/MockImageCtx.h"
++#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
++#include "test/librados_test_stub/MockTestMemRadosClient.h"
++#include "cls/lock/cls_lock_ops.h"
++#include "librbd/ManagedLock.h"
++#include "librbd/managed_lock/BreakRequest.h"
++#include "gmock/gmock.h"
++#include "gtest/gtest.h"
++#include <arpa/inet.h>
++#include <list>
++
++namespace librbd {
++namespace {
++
++struct MockTestImageCtx : public librbd::MockImageCtx {
++ MockTestImageCtx(librbd::ImageCtx &image_ctx)
++ : librbd::MockImageCtx(image_ctx) {
++ }
++};
++
++} // anonymous namespace
++} // namespace librbd
++
++// template definitions
++#include "librbd/managed_lock/BreakRequest.cc"
++
++namespace librbd {
++namespace managed_lock {
++
++using ::testing::_;
++using ::testing::DoAll;
++using ::testing::InSequence;
++using ::testing::Return;
++using ::testing::SetArgPointee;
++using ::testing::StrEq;
++using ::testing::WithArg;
++
++class TestMockManagedLockBreakRequest : public TestMockFixture {
++public:
++ typedef BreakRequest<MockTestImageCtx> MockBreakRequest;
++
++ void expect_list_watchers(MockTestImageCtx &mock_image_ctx, int r,
++ const std::string &address, uint64_t watch_handle) {
++ auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
++ list_watchers(mock_image_ctx.header_oid, _));
++ if (r < 0) {
++ expect.WillOnce(Return(r));
++ } else {
++ obj_watch_t watcher;
++ strcpy(watcher.addr, (address + ":0/0").c_str());
++ watcher.cookie = watch_handle;
++
++ std::list<obj_watch_t> watchers;
++ watchers.push_back(watcher);
++
++ expect.WillOnce(DoAll(SetArgPointee<1>(watchers), Return(0)));
++ }
++ }
++
++ void expect_blacklist_add(MockTestImageCtx &mock_image_ctx, int r) {
++ EXPECT_CALL(get_mock_rados_client(), blacklist_add(_, _))
++ .WillOnce(Return(r));
++ }
++
++ void expect_break_lock(MockTestImageCtx &mock_image_ctx, int r) {
++ EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
++ exec(mock_image_ctx.header_oid, _, StrEq("lock"), StrEq("break_lock"), _, _, _))
++ .WillOnce(Return(r));
++ }
++};
++
++TEST_F(TestMockManagedLockBreakRequest, DeadLockOwner) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
++ expect_blacklist_add(mock_image_ctx, 0);
++ expect_break_lock(mock_image_ctx, 0);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, ForceBreak) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123);
++ expect_blacklist_add(mock_image_ctx, 0);
++ expect_break_lock(mock_image_ctx, 0);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, true, &ctx);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, GetWatchersError) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, -EINVAL, "dead client", 123);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(-EINVAL, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, GetWatchersAlive) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "1.2.3.4", 123);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(-EAGAIN, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, BlacklistDisabled) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
++ expect_break_lock(mock_image_ctx, 0);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ false, false, &ctx);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, BlacklistError) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
++ expect_blacklist_add(mock_image_ctx, -EINVAL);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(-EINVAL, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, BreakLockMissing) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
++ expect_blacklist_add(mock_image_ctx, 0);
++ expect_break_lock(mock_image_ctx, -ENOENT);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockBreakRequest, BreakLockError) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_list_watchers(mock_image_ctx, 0, "dead client", 123);
++ expect_blacklist_add(mock_image_ctx, 0);
++ expect_break_lock(mock_image_ctx, -EINVAL);
++
++ C_SaferCond ctx;
++ Locker locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
++ MockBreakRequest *req = MockBreakRequest::create(mock_image_ctx, locker,
++ true, false, &ctx);
++ req->send();
++ ASSERT_EQ(-EINVAL, ctx.wait());
++}
++
++} // namespace managed_lock
++} // namespace librbd
++
--- /dev/null
--- /dev/null
++// -*- mode:C; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
++// vim: ts=8 sw=2 smarttab
++
++#include "test/librbd/test_mock_fixture.h"
++#include "test/librbd/test_support.h"
++#include "test/librbd/mock/MockImageCtx.h"
++#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
++#include "test/librados_test_stub/MockTestMemRadosClient.h"
++#include "cls/lock/cls_lock_ops.h"
++#include "librbd/ManagedLock.h"
++#include "librbd/managed_lock/GetLockerRequest.h"
++#include "librbd/managed_lock/Types.h"
++#include "gmock/gmock.h"
++#include "gtest/gtest.h"
++#include <arpa/inet.h>
++#include <list>
++
++namespace librbd {
++namespace {
++
++struct MockTestImageCtx : public librbd::MockImageCtx {
++ MockTestImageCtx(librbd::ImageCtx &image_ctx)
++ : librbd::MockImageCtx(image_ctx) {
++ }
++};
++
++} // anonymous namespace
++} // namespace librbd
++
++// template definitions
++#include "librbd/managed_lock/GetLockerRequest.cc"
++
++namespace librbd {
++namespace managed_lock {
++
++using ::testing::_;
++using ::testing::DoAll;
++using ::testing::InSequence;
++using ::testing::Return;
++using ::testing::StrEq;
++using ::testing::WithArg;
++
++class TestMockManagedLockGetLockerRequest : public TestMockFixture {
++public:
++ typedef GetLockerRequest<MockTestImageCtx> MockGetLockerRequest;
++
++ void expect_get_lock_info(MockTestImageCtx &mock_image_ctx, int r,
++ const entity_name_t &locker_entity,
++ const std::string &locker_address,
++ const std::string &locker_cookie,
++ const std::string &lock_tag,
++ ClsLockType lock_type) {
++ auto &expect = EXPECT_CALL(get_mock_io_ctx(mock_image_ctx.md_ctx),
++ exec(mock_image_ctx.header_oid, _, StrEq("lock"),
++ StrEq("get_info"), _, _, _));
++ if (r < 0 && r != -ENOENT) {
++ expect.WillOnce(Return(r));
++ } else {
++ entity_name_t entity(locker_entity);
++ entity_addr_t entity_addr;
++ entity_addr.parse(locker_address.c_str(), NULL);
++
++ cls_lock_get_info_reply reply;
++ if (r != -ENOENT) {
++ reply.lockers = decltype(reply.lockers){
++ {rados::cls::lock::locker_id_t(entity, locker_cookie),
++ rados::cls::lock::locker_info_t(utime_t(), entity_addr, "")}};
++ reply.tag = lock_tag;
++ reply.lock_type = lock_type;
++ }
++
++ bufferlist bl;
++ ::encode(reply, bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
++
++ std::string str(bl.c_str(), bl.length());
++ expect.WillOnce(DoAll(WithArg<5>(CopyInBufferlist(str)), Return(0)));
++ }
++ }
++};
++
++TEST_F(TestMockManagedLockGetLockerRequest, Success) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
++ "auto 123", ManagedLock<>::WATCHER_LOCK_TAG,
++ LOCK_EXCLUSIVE);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(0, ctx.wait());
++
++ ASSERT_EQ(entity_name_t::CLIENT(1), locker.entity);
++ ASSERT_EQ("1.2.3.4:0/0", locker.address);
++ ASSERT_EQ("auto 123", locker.cookie);
++ ASSERT_EQ(123U, locker.handle);
++}
++
++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoError) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, -EINVAL, entity_name_t::CLIENT(1), "",
++ "", "", LOCK_EXCLUSIVE);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(-EINVAL, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoEmpty) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, -ENOENT, entity_name_t::CLIENT(1), "",
++ "", "", LOCK_EXCLUSIVE);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(-ENOENT, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoExternalTag) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
++ "auto 123", "external tag", LOCK_EXCLUSIVE);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(-EBUSY, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoShared) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
++ "auto 123", ManagedLock<>::WATCHER_LOCK_TAG,
++ LOCK_SHARED);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(-EBUSY, ctx.wait());
++}
++
++TEST_F(TestMockManagedLockGetLockerRequest, GetLockInfoExternalCookie) {
++ REQUIRE_FEATURE(RBD_FEATURE_EXCLUSIVE_LOCK);
++
++ librbd::ImageCtx *ictx;
++ ASSERT_EQ(0, open_image(m_image_name, &ictx));
++
++ MockTestImageCtx mock_image_ctx(*ictx);
++ expect_op_work_queue(mock_image_ctx);
++
++ InSequence seq;
++ expect_get_lock_info(mock_image_ctx, 0, entity_name_t::CLIENT(1), "1.2.3.4",
++ "external cookie", ManagedLock<>::WATCHER_LOCK_TAG,
++ LOCK_EXCLUSIVE);
++
++ C_SaferCond ctx;
++ Locker locker;
++ MockGetLockerRequest *req = MockGetLockerRequest::create(mock_image_ctx,
++ &locker, &ctx);
++ req->send();
++ ASSERT_EQ(-EBUSY, ctx.wait());
++}
++
++} // namespace managed_lock
++} // namespace librbd