#include "librbd/AioImageRequest.h"
#include "librbd/AioCompletion.h"
#include "librbd/AioObjectRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
!m_image_ctx.journal->is_journal_replaying());
}
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
m_aio_comp->set_request_count(
m_image_ctx.cct, object_extents.size() +
#include "librbd/AioImageRequestWQ.h"
#include "librbd/AioCompletion.h"
#include "librbd/AioImageRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
time_t ti, ThreadPool *tp)
: ThreadPool::PointerWQ<AioImageRequest>(name, ti, 0, tp),
m_image_ctx(*image_ctx), m_lock("AioImageRequestWQ::m_lock"),
- m_write_blockers(0), m_in_progress_writes(0), m_queued_writes(0),
- m_lock_listener(this), m_blocking_writes(false) {
-
+ m_write_blockers(0), m_in_progress_writes(0), m_queued_writes(0) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << ": ictx=" << image_ctx << dendl;
}
}
}
-void AioImageRequestWQ::register_lock_listener() {
- m_image_ctx.image_watcher->register_listener(&m_lock_listener);
-}
-
void *AioImageRequestWQ::_void_dequeue() {
AioImageRequest *peek_item = front();
if (peek_item == NULL) {
bool AioImageRequestWQ::is_lock_required() const {
assert(m_image_ctx.owner_lock.is_locked());
- if (m_image_ctx.image_watcher == NULL) {
+ if (m_image_ctx.exclusive_lock == NULL) {
return false;
}
- return (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner());
+ return (!m_image_ctx.exclusive_lock->is_lock_owner());
}
void AioImageRequestWQ::queue(AioImageRequest *req) {
assert(m_image_ctx.owner_lock.is_locked());
- bool first_write_op = false;
{
Mutex::Locker locker(m_lock);
if (req->is_write_op()) {
- if (++m_queued_writes == 1) {
- first_write_op = true;
- }
+ ++m_queued_writes;
}
}
ThreadPool::PointerWQ<AioImageRequest>::queue(req);
- if (is_lock_required() && first_write_op) {
- m_image_ctx.image_watcher->request_lock();
- }
-}
-
-void AioImageRequestWQ::handle_lock_updated(
- ImageWatcher::LockUpdateState state) {
- assert(m_image_ctx.owner_lock.is_locked());
-
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << __func__ << ": ictx=" << &m_image_ctx << ", "
- << "state=" << state << dendl;
-
- if ((state == ImageWatcher::LOCK_UPDATE_STATE_NOT_SUPPORTED ||
- state == ImageWatcher::LOCK_UPDATE_STATE_LOCKED) && m_blocking_writes) {
- m_blocking_writes = false;
- unblock_writes();
- } else if (state == ImageWatcher::LOCK_UPDATE_STATE_RELEASING &&
- !m_blocking_writes) {
- m_blocking_writes = true;
- block_writes();
- } else if (state == ImageWatcher::LOCK_UPDATE_STATE_UNLOCKED) {
- assert(m_blocking_writes);
- assert(writes_blocked());
- } else if (state == ImageWatcher::LOCK_UPDATE_STATE_NOTIFICATION &&
- !writes_empty()) {
- m_image_ctx.image_watcher->request_lock();
+ if (is_lock_required()) {
+ m_image_ctx.exclusive_lock->request_lock(nullptr);
}
}
#include "include/Context.h"
#include "common/WorkQueue.h"
#include "common/Mutex.h"
-#include "librbd/ImageWatcher.h"
namespace librbd {
void block_writes(Context *on_blocked);
void unblock_writes();
- void register_lock_listener();
-
protected:
virtual void *_void_dequeue();
virtual void process(AioImageRequest *req);
private:
typedef std::list<Context *> Contexts;
- struct LockListener : public ImageWatcher::Listener {
- AioImageRequestWQ *aio_work_queue;
- LockListener(AioImageRequestWQ *_aio_work_queue)
- : aio_work_queue(_aio_work_queue) {
- }
-
- virtual bool handle_requested_lock() {
- return true;
- }
- virtual void handle_lock_updated(ImageWatcher::LockUpdateState state) {
- aio_work_queue->handle_lock_updated(state);
- }
- };
-
struct C_BlockedWrites : public Context {
AioImageRequestWQ *aio_work_queue;
C_BlockedWrites(AioImageRequestWQ *_aio_work_queue)
uint32_t m_in_progress_writes;
uint32_t m_queued_writes;
- LockListener m_lock_listener;
- bool m_blocking_writes;
-
bool is_journal_required() const;
bool is_lock_required() const;
void queue(AioImageRequest *req);
- void handle_lock_updated(ImageWatcher::LockUpdateState state);
void handle_blocked_writes(int r);
};
#include "librbd/AioCompletion.h"
#include "librbd/AioImageRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
write = true;
} else {
// should have been flushed prior to releasing lock
- assert(m_ictx->image_watcher->is_lock_owner());
+ assert(m_ictx->exclusive_lock->is_lock_owner());
ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
}
// should have been flushed prior to releasing lock
- assert(m_ictx->image_watcher->is_lock_owner());
+ assert(m_ictx->exclusive_lock->is_lock_owner());
ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
#include "librbd/AioObjectRequest.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/CopyupRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
if (snap_id == CEPH_NOSNAP) {
RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
- assert(m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock->is_lock_owner());
bool sent = m_image_ctx.object_map.aio_update(m_object_no, OBJECT_EXISTS,
boost::optional<uint8_t>(),
this);
RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (m_ictx->object_map.enabled()) {
bool copy_on_read = m_pending_requests.empty();
- if (!m_ictx->image_watcher->is_lock_owner()) {
+ if (!m_ictx->exclusive_lock->is_lock_owner()) {
ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
<< dendl;
assert(copy_on_read);
// vim: ts=8 sw=2 smarttab
#include "librbd/ExclusiveLock.h"
+#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AioImageRequestWQ.h"
template <typename I>
bool ExclusiveLock<I>::is_lock_owner() const {
- ldout(m_image_ctx.cct, 20) << __func__ << dendl;
- assert(m_image_ctx.owner_lock.is_locked());
-
Mutex::Locker locker(m_lock);
- return (m_state == STATE_LOCKED);
+
+ bool lock_owner;
+ switch (m_state) {
+ case STATE_LOCKED:
+ case STATE_POST_ACQUIRING:
+ case STATE_PRE_RELEASING:
+ lock_owner = true;
+ break;
+ default:
+ lock_owner = false;
+ break;
+ }
+
+ ldout(m_image_ctx.cct, 20) << this << " " << __func__ << "=" << lock_owner
+ << dendl;
+ return lock_owner;
}
template <typename I>
void ExclusiveLock<I>::init(Context *on_init) {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
-
- assert(m_image_ctx.owner_lock.is_wlocked());
+ assert(m_image_ctx.owner_lock.is_locked());
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
- assert(m_state == STATE_UNINITIALIZED);
- m_state = STATE_INITIALIZING;
+ {
+ Mutex::Locker locker(m_lock);
+ assert(m_state == STATE_UNINITIALIZED);
+ m_state = STATE_INITIALIZING;
+ }
m_image_ctx.aio_work_queue->block_writes(new C_InitComplete(this, on_init));
}
template <typename I>
void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
- assert(m_image_ctx.owner_lock.is_wlocked());
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
assert(!is_shutdown());
assert(!is_shutdown());
if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
execute_action(ACTION_TRY_LOCK, on_tried_lock);
return;
}
void ExclusiveLock<I>::request_lock(Context *on_locked) {
{
Mutex::Locker locker(m_lock);
+ assert(m_image_ctx.owner_lock.is_locked());
assert(!is_shutdown());
- assert(m_image_ctx.owner_lock.is_wlocked());
-
if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
execute_action(ACTION_REQUEST_LOCK, on_locked);
return;
}
void ExclusiveLock<I>::release_lock(Context *on_released) {
{
Mutex::Locker locker(m_lock);
- assert(m_image_ctx.owner_lock.is_wlocked());
+ assert(m_image_ctx.owner_lock.is_locked());
assert(!is_shutdown());
if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
execute_action(ACTION_RELEASE_LOCK, on_released);
return;
}
return;
}
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
assert(get_active_action() == ACTION_REQUEST_LOCK);
execute_next_action();
}
template <typename I>
-void ExclusiveLock<I>::set_watch_handle(uint64_t watch_handle) {
+void ExclusiveLock<I>::assert_header_locked(librados::ObjectWriteOperation *op) {
Mutex::Locker locker(m_lock);
- assert(m_watch_handle == 0 || watch_handle == 0);
- m_watch_handle = watch_handle;
+ rados::cls::lock::assert_locked(op, RBD_LOCK_NAME, LOCK_EXCLUSIVE,
+ encode_lock_cookie(), WATCHER_LOCK_TAG);
}
template <typename I>
case STATE_INITIALIZING:
case STATE_ACQUIRING:
case STATE_WAITING_FOR_PEER:
+ case STATE_POST_ACQUIRING:
+ case STATE_PRE_RELEASING:
case STATE_RELEASING:
case STATE_SHUTTING_DOWN:
return true;
}
m_lock.Lock();
- if (!m_actions_contexts.empty()) {
+ if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action();
}
}
template <typename I>
void ExclusiveLock<I>::handle_init_complete() {
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
Mutex::Locker locker(m_lock);
m_state = STATE_UNLOCKED;
return;
}
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_ACQUIRING;
+ m_watch_handle = m_image_ctx.image_watcher->get_watch_handle();
+
using el = ExclusiveLock<I>;
AcquireRequest<I>* req = AcquireRequest<I>::create(
m_image_ctx, encode_lock_cookie(),
util::create_context_callback<el, &el::handle_acquire_lock>(this));
+
+ m_lock.Unlock();
req->send();
+ m_lock.Lock();
}
template <typename I>
void ExclusiveLock<I>::handle_acquire_lock(int r) {
CephContext *cct = m_image_ctx.cct;
- ldout(cct, 10) << __func__ << ": r=" << r << dendl;
+ ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r == -EBUSY) {
ldout(cct, 5) << "unable to acquire exclusive lock" << dendl;
r = 0;
}
- Mutex::Locker locker(m_lock);
if (next_state == STATE_LOCKED) {
+ {
+ Mutex::Locker locker(m_lock);
+ m_state = STATE_POST_ACQUIRING;
+ }
+ m_image_ctx.image_watcher->notify_acquired_lock();
m_image_ctx.aio_work_queue->unblock_writes();
}
+
+ Mutex::Locker locker(m_lock);
complete_active_action(next_state, r);
}
return;
}
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
- m_state = STATE_RELEASING;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
+ m_state = STATE_PRE_RELEASING;
m_image_ctx.op_work_queue->queue(
new C_BlockWrites(m_image_ctx, new C_ReleaseBlockWrites(this)), 0);
return;
}
- ldout(m_image_ctx.cct, 10) << __func__ << ": r=" << r << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
Mutex::Locker locker(m_lock);
- assert(m_state == STATE_RELEASING);
+ assert(m_state == STATE_PRE_RELEASING);
+ m_state = STATE_RELEASING;
using el = ExclusiveLock<I>;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(
m_image_ctx, encode_lock_cookie(),
util::create_context_callback<el, &el::handle_release_lock>(this));
+
+ m_lock.Unlock();
req->send();
+ m_lock.Lock();
}
template <typename I>
void ExclusiveLock<I>::handle_release_lock(int r) {
- Mutex::Locker locker(m_lock);
+ bool pending_writes = false;
+ {
+ Mutex::Locker locker(m_lock);
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": r=" << r
+ << dendl;
- ldout(m_image_ctx.cct, 10) << __func__ << ": r=" << r << dendl;
+ assert(m_state == STATE_RELEASING);
+ if (r < 0) {
+ m_image_ctx.aio_work_queue->unblock_writes();
+ } else {
+ m_lock.Unlock();
+ m_image_ctx.image_watcher->notify_released_lock();
+ pending_writes = !m_image_ctx.aio_work_queue->writes_empty();
+ m_lock.Lock();
- if (r < 0) {
- m_image_ctx.aio_work_queue->unblock_writes();
+ m_watch_handle = 0;
+ }
+ complete_active_action(r < 0 ? STATE_LOCKED : STATE_UNLOCKED, r);
+ }
+
+ if (r >= 0 && pending_writes) {
+ // if we have pending writes -- re-request the lock
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ request_lock(nullptr);
}
- complete_active_action(r < 0 ? STATE_LOCKED : STATE_UNLOCKED, r);
}
template <typename I>
assert(m_lock.is_locked());
if (m_state == STATE_UNLOCKED) {
m_image_ctx.aio_work_queue->unblock_writes();
- complete_active_action(STATE_SHUTDOWN, 0);
+ m_image_ctx.op_work_queue->queue(util::create_context_callback<
+ ExclusiveLock<I>, &ExclusiveLock<I>::complete_shutdown>(this), 0);
return;
}
- ldout(m_image_ctx.cct, 10) << __func__ << dendl;
+ ldout(m_image_ctx.cct, 10) << this << " " << __func__ << dendl;
assert(m_state == STATE_LOCKED);
m_state = STATE_SHUTTING_DOWN;
+ m_lock.Unlock();
+ m_image_ctx.op_work_queue->queue(new C_ShutDownRelease(this), 0);
+ m_lock.Lock();
+}
+
+template <typename I>
+void ExclusiveLock<I>::send_shutdown_release() {
+ std::string cookie;
+ {
+ Mutex::Locker locker(m_lock);
+ cookie = encode_lock_cookie();
+ }
+
using el = ExclusiveLock<I>;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(
- m_image_ctx, encode_lock_cookie(),
+ m_image_ctx, cookie,
util::create_context_callback<el, &el::handle_shutdown>(this));
req->send();
}
template <typename I>
void ExclusiveLock<I>::handle_shutdown(int r) {
- Mutex::Locker locker(m_lock);
- ldout(m_image_ctx.cct, 10) << __func__ << ": r=" << r << dendl;
+ CephContext *cct = m_image_ctx.cct;
+ ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
- complete_active_action(r == 0 ? STATE_SHUTDOWN : STATE_LOCKED, r);
+ if (r < 0) {
+ lderr(cct) << "failed to shut down exclusive lock: " << cpp_strerror(r)
+ << dendl;
+ }
+
+ m_image_ctx.image_watcher->notify_released_lock();
+ complete_shutdown(r);
+}
+
+template <typename I>
+void ExclusiveLock<I>::complete_shutdown(int r) {
+ ActionContexts action_contexts;
+ {
+ Mutex::Locker locker(m_lock);
+ assert(m_lock.is_locked());
+ assert(m_actions_contexts.size() == 1);
+
+ action_contexts = std::move(m_actions_contexts.front());
+ m_actions_contexts.pop_front();
+ m_state = STATE_SHUTDOWN;
+ }
+
+ // expect to be destroyed after firing callback
+ for (auto ctx : action_contexts.second) {
+ ctx->complete(r);
+ }
}
} // namespace librbd
#include "include/int_types.h"
#include "include/Context.h"
+#include "include/rados/librados.hpp"
#include "common/Mutex.h"
#include "common/RWLock.h"
#include <list>
void handle_lock_released();
- void set_watch_handle(uint64_t watch_handle);
+ void assert_header_locked(librados::ObjectWriteOperation *op);
+
static bool decode_lock_cookie(const std::string &cookie, uint64_t *handle);
private:
* v (init) (try_lock/request_lock) * |
* UNINITIALIZED -------> UNLOCKED ------------------------> ACQUIRING <--/
* ^ |
+ * | v
+ * RELEASING POST_ACQUIRING
+ * | |
* | |
* | (release_lock) v
- * RELEASING <------------------------- LOCKED
+ * PRE_RELEASING <------------------------ LOCKED
*
* <UNLOCKED/LOCKED states>
* |
STATE_LOCKED,
STATE_INITIALIZING,
STATE_ACQUIRING,
+ STATE_POST_ACQUIRING,
STATE_WAITING_FOR_PEER,
+ STATE_PRE_RELEASING,
STATE_RELEASING,
STATE_SHUTTING_DOWN,
STATE_SHUTDOWN,
}
};
+ struct C_ShutDownRelease : public Context {
+ ExclusiveLock *exclusive_lock;
+ C_ShutDownRelease(ExclusiveLock *exclusive_lock)
+ : exclusive_lock(exclusive_lock) {
+ }
+ virtual void finish(int r) override {
+ exclusive_lock->send_shutdown_release();
+ }
+ };
+
ImageCtxT &m_image_ctx;
mutable Mutex m_lock;
void handle_release_lock(int r);
void send_shutdown();
+ void send_shutdown_release();
void handle_shutdown(int r);
+ void complete_shutdown(int r);
};
} // namespace librbd
int ImageCtx::register_watch() {
assert(image_watcher == NULL);
image_watcher = new ImageWatcher(*this);
- aio_work_queue->register_lock_listener();
return image_watcher->register_watch();
}
// vim: ts=8 sw=2 smarttab
#include "librbd/ImageWatcher.h"
#include "librbd/AioCompletion.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/TaskFinisher.h"
#include "librbd/Utils.h"
-#include "cls/lock/cls_lock_client.h"
-#include "cls/lock/cls_lock_types.h"
#include "include/encoding.h"
#include "include/stringify.h"
#include "common/errno.h"
using namespace watch_notify;
-static const std::string WATCHER_LOCK_TAG = "internal";
-static const std::string WATCHER_LOCK_COOKIE_PREFIX = "auto";
-
static const uint64_t NOTIFY_TIMEOUT = 5000;
static const double RETRY_DELAY_SECONDS = 1.0;
m_watch_lock(util::unique_lock_name("librbd::ImageWatcher::m_watch_lock", this)),
m_watch_ctx(*this), m_watch_handle(0),
m_watch_state(WATCH_STATE_UNREGISTERED),
- m_refresh_lock(util::unique_lock_name("librbd::ImageWatcher::m_refresh_lock", this)),
- m_lock_supported(false), m_lock_owner_state(LOCK_OWNER_STATE_NOT_LOCKED),
- m_listeners_lock(util::unique_lock_name("librbd::ImageWatcher::m_listeners_lock", this)),
- m_listeners_in_use(false),
m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)),
m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)),
m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this))
RWLock::RLocker l(m_watch_lock);
assert(m_watch_state != WATCH_STATE_REGISTERED);
}
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- assert(m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED);
- }
-}
-
-bool ImageWatcher::is_lock_supported() const {
- RWLock::RLocker l(m_image_ctx.snap_lock);
- return is_lock_supported(m_image_ctx.snap_lock);
-}
-
-bool ImageWatcher::is_lock_supported(const RWLock &) const {
- assert(m_image_ctx.owner_lock.is_locked());
- assert(m_image_ctx.snap_lock.is_locked());
- return ((m_image_ctx.features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
- !m_image_ctx.read_only && m_image_ctx.snap_id == CEPH_NOSNAP);
-}
-
-bool ImageWatcher::is_lock_owner() const {
- assert(m_image_ctx.owner_lock.is_locked());
- return (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED ||
- m_lock_owner_state == LOCK_OWNER_STATE_RELEASING);
-}
-
-void ImageWatcher::register_listener(Listener *listener) {
- Mutex::Locker listeners_locker(m_listeners_lock);
- m_listeners.push_back(listener);
-}
-
-void ImageWatcher::unregister_listener(Listener *listener) {
- // TODO CoW listener list
- Mutex::Locker listeners_locker(m_listeners_lock);
- while (m_listeners_in_use) {
- m_listeners_cond.Wait(m_listeners_lock);
- }
- m_listeners.remove(listener);
}
int ImageWatcher::register_watch() {
return r;
}
-int ImageWatcher::refresh() {
- assert(m_image_ctx.owner_lock.is_locked());
-
- bool lock_support_changed = false;
- {
- Mutex::Locker refresh_locker(m_refresh_lock);
- if (m_lock_supported != is_lock_supported()) {
- m_lock_supported = is_lock_supported();
- lock_support_changed = true;
- }
- }
-
- int r = 0;
- if (lock_support_changed) {
- if (is_lock_supported()) {
- // image opened, exclusive lock dynamically enabled, or now HEAD
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_RELEASING);
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_UNLOCKED);
- } else if (!is_lock_supported()) {
- if (is_lock_owner()) {
- // exclusive lock dynamically disabled or now snapshot
- m_image_ctx.owner_lock.put_read();
- {
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
- r = release_lock();
- }
- m_image_ctx.owner_lock.get_read();
- }
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_NOT_SUPPORTED);
- }
- }
- return r;
-}
-
-int ImageWatcher::try_lock() {
- assert(m_image_ctx.owner_lock.is_wlocked());
- assert(m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED);
- assert(is_lock_supported());
-
- while (true) {
- int r = lock();
- if (r != -EBUSY) {
- return r;
- }
-
- // determine if the current lock holder is still alive
- entity_name_t locker;
- std::string locker_cookie;
- std::string locker_address;
- uint64_t locker_handle;
- r = get_lock_owner_info(&locker, &locker_cookie, &locker_address,
- &locker_handle);
- if (r < 0) {
- return r;
- }
- if (locker_cookie.empty() || locker_address.empty()) {
- // lock is now unlocked ... try again
- continue;
- }
-
- std::list<obj_watch_t> watchers;
- r = m_image_ctx.md_ctx.list_watchers(m_image_ctx.header_oid, &watchers);
- if (r < 0) {
- return r;
- }
-
- for (std::list<obj_watch_t>::iterator iter = watchers.begin();
- iter != watchers.end(); ++iter) {
- if ((strncmp(locker_address.c_str(),
- iter->addr, sizeof(iter->addr)) == 0) &&
- (locker_handle == iter->cookie)) {
- Mutex::Locker l(m_owner_client_id_lock);
- set_owner_client_id(ClientId(iter->watcher_id, locker_handle));
- return 0;
- }
- }
-
- if (m_image_ctx.blacklist_on_break_lock) {
- ldout(m_image_ctx.cct, 1) << this << " blacklisting client: " << locker
- << "@" << locker_address << dendl;
- librados::Rados rados(m_image_ctx.md_ctx);
- r = rados.blacklist_add(locker_address,
- m_image_ctx.blacklist_expire_seconds);
- if (r < 0) {
- lderr(m_image_ctx.cct) << this << " unable to blacklist client: "
- << cpp_strerror(r) << dendl;
- return r;
- }
- }
-
- ldout(m_image_ctx.cct, 5) << this << " breaking exclusive lock: " << locker
- << dendl;
- r = rados::cls::lock::break_lock(&m_image_ctx.md_ctx,
- m_image_ctx.header_oid, RBD_LOCK_NAME,
- locker_cookie, locker);
- if (r < 0 && r != -ENOENT) {
- return r;
- }
- }
- return 0;
-}
-
-void ImageWatcher::request_lock() {
- schedule_request_lock(false);
-}
-
-bool ImageWatcher::try_request_lock() {
- assert(m_image_ctx.owner_lock.is_locked());
- if (is_lock_owner()) {
- return true;
- }
-
- int r = 0;
- m_image_ctx.owner_lock.put_read();
- {
- RWLock::WLocker l(m_image_ctx.owner_lock);
- if (!is_lock_owner()) {
- r = try_lock();
- }
- }
- m_image_ctx.owner_lock.get_read();
-
- if (r < 0) {
- ldout(m_image_ctx.cct, 5) << this << " failed to acquire exclusive lock:"
- << cpp_strerror(r) << dendl;
- return false;
- }
-
- if (is_lock_owner()) {
- ldout(m_image_ctx.cct, 15) << this << " successfully acquired exclusive lock"
- << dendl;
- } else {
- ldout(m_image_ctx.cct, 15) << this
- << " unable to acquire exclusive lock, retrying"
- << dendl;
- }
- return is_lock_owner();
-}
-
-int ImageWatcher::get_lock_owner_info(entity_name_t *locker, std::string *cookie,
- std::string *address, uint64_t *handle) {
- std::map<rados::cls::lock::locker_id_t,
- rados::cls::lock::locker_info_t> lockers;
- ClsLockType lock_type;
- std::string lock_tag;
- int r = rados::cls::lock::get_lock_info(&m_image_ctx.md_ctx,
- m_image_ctx.header_oid,
- RBD_LOCK_NAME, &lockers, &lock_type,
- &lock_tag);
- if (r < 0) {
- return r;
- }
-
- if (lockers.empty()) {
- ldout(m_image_ctx.cct, 20) << this << " no lockers detected" << dendl;
- return 0;
- }
-
- if (lock_tag != WATCHER_LOCK_TAG) {
- ldout(m_image_ctx.cct, 5) << this << " locked by external mechanism: tag="
- << lock_tag << dendl;
- return -EBUSY;
- }
-
- if (lock_type == LOCK_SHARED) {
- ldout(m_image_ctx.cct, 5) << this << " shared lock type detected" << dendl;
- return -EBUSY;
- }
-
- std::map<rados::cls::lock::locker_id_t,
- rados::cls::lock::locker_info_t>::iterator iter = lockers.begin();
- if (!decode_lock_cookie(iter->first.cookie, handle)) {
- ldout(m_image_ctx.cct, 5) << this << " locked by external mechanism: "
- << "cookie=" << iter->first.cookie << dendl;
- return -EBUSY;
- }
-
- *locker = iter->first.locker;
- *cookie = iter->first.cookie;
- *address = stringify(iter->second.addr);
- ldout(m_image_ctx.cct, 10) << this << " retrieved exclusive locker: "
- << *locker << "@" << *address << dendl;
- return 0;
-}
-
-int ImageWatcher::lock() {
- assert(m_image_ctx.owner_lock.is_wlocked());
- assert(m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED);
-
- int r = rados::cls::lock::lock(&m_image_ctx.md_ctx, m_image_ctx.header_oid,
- RBD_LOCK_NAME, LOCK_EXCLUSIVE,
- encode_lock_cookie(), WATCHER_LOCK_TAG, "",
- utime_t(), 0);
- if (r < 0) {
- return r;
- }
-
- ldout(m_image_ctx.cct, 10) << this << " acquired exclusive lock" << dendl;
- m_lock_owner_state = LOCK_OWNER_STATE_LOCKED;
-
- ClientId owner_client_id = get_client_id();
- {
- Mutex::Locker l(m_owner_client_id_lock);
- set_owner_client_id(owner_client_id);
- }
-
- if (m_image_ctx.object_map.enabled()) {
- r = m_image_ctx.object_map.lock();
- if (r < 0 && r != -ENOENT) {
- unlock();
- return r;
- }
- RWLock::WLocker l2(m_image_ctx.snap_lock);
- m_image_ctx.object_map.refresh(CEPH_NOSNAP);
- }
-
- // send the notification when we aren't holding locks
- FunctionContext *ctx = new FunctionContext(
- boost::bind(&ImageWatcher::notify_acquired_lock, this));
- m_task_finisher->queue(TASK_CODE_ACQUIRED_LOCK, ctx);
- return 0;
-}
-
-int ImageWatcher::unlock()
-{
- assert(m_image_ctx.owner_lock.is_wlocked());
-
- ldout(m_image_ctx.cct, 10) << this << " releasing exclusive lock" << dendl;
- m_lock_owner_state = LOCK_OWNER_STATE_NOT_LOCKED;
- int r = rados::cls::lock::unlock(&m_image_ctx.md_ctx, m_image_ctx.header_oid,
- RBD_LOCK_NAME, encode_lock_cookie());
- if (r < 0 && r != -ENOENT) {
- lderr(m_image_ctx.cct) << this << " failed to release exclusive lock: "
- << cpp_strerror(r) << dendl;
- return r;
- }
-
- if (m_image_ctx.object_map.enabled()) {
- m_image_ctx.object_map.unlock();
- }
-
- {
- Mutex::Locker l(m_owner_client_id_lock);
- set_owner_client_id(ClientId());
- }
-
- FunctionContext *ctx = new FunctionContext(
- boost::bind(&ImageWatcher::notify_released_lock, this));
- m_task_finisher->queue(TASK_CODE_RELEASED_LOCK, ctx);
- return 0;
-}
-
-int ImageWatcher::release_lock()
-{
- assert(m_image_ctx.owner_lock.is_wlocked());
-
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 10) << this << " releasing exclusive lock by request" << dendl;
- if (m_lock_owner_state != LOCK_OWNER_STATE_LOCKED) {
- return 0;
- }
-
- m_lock_owner_state = LOCK_OWNER_STATE_RELEASING;
- m_image_ctx.owner_lock.put_write();
-
- // ensure all maint operations are canceled
- m_image_ctx.cancel_async_requests();
- m_image_ctx.flush_async_operations();
-
- int r;
- {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-
- // alert listeners that all incoming IO needs to be stopped since the
- // lock is being released
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_RELEASING);
-
- RWLock::WLocker md_locker(m_image_ctx.md_lock);
- r = m_image_ctx.flush();
- if (r < 0) {
- lderr(cct) << this << " failed to flush: " << cpp_strerror(r) << dendl;
- goto err_cancel_unlock;
- }
- }
-
- m_image_ctx.owner_lock.get_write();
- assert(m_lock_owner_state == LOCK_OWNER_STATE_RELEASING);
- r = unlock();
-
- // notify listeners of the change w/ owner read locked
- m_image_ctx.owner_lock.put_write();
- {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED) {
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_UNLOCKED);
- }
- }
- m_image_ctx.owner_lock.get_write();
-
- if (r < 0) {
- lderr(cct) << this << " failed to unlock: " << cpp_strerror(r) << dendl;
- return r;
- }
-
- return 0;
-
-err_cancel_unlock:
- m_image_ctx.owner_lock.get_write();
- if (m_lock_owner_state == LOCK_OWNER_STATE_RELEASING) {
- m_lock_owner_state = LOCK_OWNER_STATE_LOCKED;
- }
- return r;
-}
-
-void ImageWatcher::assert_header_locked(librados::ObjectWriteOperation *op) {
- rados::cls::lock::assert_locked(op, RBD_LOCK_NAME, LOCK_EXCLUSIVE,
- encode_lock_cookie(), WATCHER_LOCK_TAG);
-}
-
void ImageWatcher::schedule_async_progress(const AsyncRequestId &request,
uint64_t offset, uint64_t total) {
FunctionContext *ctx = new FunctionContext(
int ImageWatcher::notify_flatten(uint64_t request_id, ProgressContext &prog_ctx) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
int ImageWatcher::notify_resize(uint64_t request_id, uint64_t size,
ProgressContext &prog_ctx) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
int ImageWatcher::notify_snap_create(const std::string &snap_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(SnapCreatePayload(snap_name)), bl);
int ImageWatcher::notify_snap_rename(const snapid_t &src_snap_id,
const std::string &dst_snap_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(SnapRenamePayload(src_snap_id, dst_snap_name)), bl);
}
int ImageWatcher::notify_snap_remove(const std::string &snap_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(SnapRemovePayload(snap_name)), bl);
int ImageWatcher::notify_snap_protect(const std::string &snap_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(SnapProtectPayload(snap_name)), bl);
int ImageWatcher::notify_snap_unprotect(const std::string &snap_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(SnapUnprotectPayload(snap_name)), bl);
int ImageWatcher::notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
int ImageWatcher::notify_rename(const std::string &image_name) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(!is_lock_owner());
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
bufferlist bl;
::encode(NotifyMessage(RenamePayload(image_name)), bl);
return notify_lock_owner(bl);
}
-void ImageWatcher::notify_lock_state() {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
- // re-send the acquired lock notification so that peers know they can now
- // request the lock
- ldout(m_image_ctx.cct, 10) << this << " notify lock state" << dendl;
-
- bufferlist bl;
- ::encode(NotifyMessage(AcquiredLockPayload(get_client_id())), bl);
-
- m_image_ctx.md_ctx.notify2(m_image_ctx.header_oid, bl, NOTIFY_TIMEOUT,
- NULL);
- }
-}
-
void ImageWatcher::notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid)
{
io_ctx.notify2(oid, bl, NOTIFY_TIMEOUT, NULL);
}
-std::string ImageWatcher::encode_lock_cookie() const {
- RWLock::RLocker l(m_watch_lock);
- std::ostringstream ss;
- ss << WATCHER_LOCK_COOKIE_PREFIX << " " << m_watch_handle;
- return ss.str();
-}
-
-bool ImageWatcher::decode_lock_cookie(const std::string &tag,
- uint64_t *handle) {
- std::string prefix;
- std::istringstream ss(tag);
- if (!(ss >> prefix >> *handle) || prefix != WATCHER_LOCK_COOKIE_PREFIX) {
- return false;
- }
- return true;
-}
-
void ImageWatcher::schedule_cancel_async_requests() {
FunctionContext *ctx = new FunctionContext(
boost::bind(&ImageWatcher::cancel_async_requests, this));
void ImageWatcher::notify_acquired_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state != LOCK_OWNER_STATE_LOCKED) {
- return;
+ ClientId client_id = get_client_id();
+ {
+ Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ set_owner_client_id(client_id);
}
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_LOCKED);
-
bufferlist bl;
- ::encode(NotifyMessage(AcquiredLockPayload(get_client_id())), bl);
+ ::encode(NotifyMessage(AcquiredLockPayload(client_id)), bl);
m_image_ctx.md_ctx.notify2(m_image_ctx.header_oid, bl, NOTIFY_TIMEOUT, NULL);
}
-void ImageWatcher::notify_release_lock() {
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
- release_lock();
-}
-
void ImageWatcher::notify_released_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl;
+
+ {
+ Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ set_owner_client_id(ClientId());
+ }
+
bufferlist bl;
::encode(NotifyMessage(ReleasedLockPayload(get_client_id())), bl);
m_image_ctx.md_ctx.notify2(m_image_ctx.header_oid, bl, NOTIFY_TIMEOUT, NULL);
void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) {
assert(m_image_ctx.owner_lock.is_locked());
- assert(m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED);
+
+ if (m_image_ctx.exclusive_lock == nullptr) {
+ // exclusive lock dynamically disabled via image refresh
+ return;
+ }
+ assert(m_image_ctx.exclusive_lock &&
+ !m_image_ctx.exclusive_lock->is_lock_owner());
RWLock::RLocker watch_locker(m_watch_lock);
if (m_watch_state == WATCH_STATE_REGISTERED) {
}
void ImageWatcher::notify_request_lock() {
- ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl;
-
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (try_request_lock()) {
- return;
- }
+ ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl;
bufferlist bl;
::encode(NotifyMessage(RequestLockPayload(get_client_id())), bl);
if (r == -ETIMEDOUT) {
ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying"
<< dendl;
- schedule_request_lock(false);
+
+ // treat this is a dead client -- so retest acquiring the lock
+ m_image_ctx.exclusive_lock->handle_lock_released();
} else if (r < 0) {
lderr(m_image_ctx.cct) << this << " error requesting lock: "
<< cpp_strerror(r) << dendl;
int r = m_image_ctx.md_ctx.notify2(m_image_ctx.header_oid, bl, NOTIFY_TIMEOUT,
&response_bl);
m_image_ctx.owner_lock.get_read();
+
if (r < 0 && r != -ETIMEDOUT) {
lderr(m_image_ctx.cct) << this << " lock owner notification failed: "
<< cpp_strerror(r) << dendl;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
- Mutex::Locker l(m_owner_client_id_lock);
+ Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
if (payload.client_id == m_owner_client_id) {
cancel_async_requests = false;
}
}
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED) {
- if (cancel_async_requests) {
- schedule_cancel_async_requests();
- }
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_NOTIFICATION);
+ if (cancel_async_requests &&
+ (m_image_ctx.exclusive_lock == nullptr ||
+ !m_image_ctx.exclusive_lock->is_lock_owner())) {
+ schedule_cancel_async_requests();
}
return true;
}
}
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_NOT_LOCKED) {
- if (cancel_async_requests) {
- schedule_cancel_async_requests();
- }
- notify_listeners_updated_lock(LOCK_UPDATE_STATE_NOTIFICATION);
+ if (cancel_async_requests &&
+ (m_image_ctx.exclusive_lock == nullptr ||
+ !m_image_ctx.exclusive_lock->is_lock_owner())) {
+ schedule_cancel_async_requests();
+ }
+
+ // alert the exclusive lock state machine that the lock is available
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ !m_image_ctx.exclusive_lock->is_lock_owner()) {
+ m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK);
+ m_image_ctx.exclusive_lock->handle_lock_released();
}
return true;
}
}
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
// need to send something back so the client can detect a missing leader
::encode(ResponseMessage(0), ack_ctx->out);
{
- Mutex::Locker l(m_owner_client_id_lock);
+ Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
if (!m_owner_client_id.is_valid()) {
return true;
}
}
- bool release_permitted = true;
- {
- Mutex::Locker listeners_locker(m_listeners_lock);
- for (Listeners::iterator it = m_listeners.begin();
- it != m_listeners.end(); ++it) {
- if (!(*it)->handle_requested_lock()) {
- release_permitted = false;
- break;
- }
- }
- }
-
- if (release_permitted) {
- ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock"
- << dendl;
- FunctionContext *ctx = new FunctionContext(
- boost::bind(&ImageWatcher::notify_release_lock, this));
- m_task_finisher->queue(TASK_CODE_RELEASING_LOCK, ctx);
- }
+ ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock"
+ << dendl;
+ FunctionContext *ctx = new FunctionContext(
+ boost::bind(&ImageWatcher::notify_release_lock, this));
+ m_task_finisher->queue(TASK_CODE_RELEASING_LOCK, ctx);
}
return true;
}
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
bool new_request;
Context *ctx;
ProgressContext *prog_ctx;
bool ImageWatcher::handle_payload(const ResizePayload &payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
bool new_request;
Context *ctx;
ProgressContext *prog_ctx;
bool ImageWatcher::handle_payload(const SnapCreatePayload &payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: "
<< payload.snap_name << dendl;
bool ImageWatcher::handle_payload(const SnapRenamePayload &payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: "
<< payload.snap_id << " to "
<< payload.snap_name << dendl;
bool ImageWatcher::handle_payload(const SnapRemovePayload &payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: "
<< payload.snap_name << dendl;
bool ImageWatcher::handle_payload(const SnapProtectPayload& payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: "
<< payload.snap_name << dendl;
bool ImageWatcher::handle_payload(const SnapUnprotectPayload& payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: "
<< payload.snap_name << dendl;
bool ImageWatcher::handle_payload(const RebuildObjectMapPayload& payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
bool new_request;
Context *ctx;
ProgressContext *prog_ctx;
bool ImageWatcher::handle_payload(const RenamePayload& payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
ldout(m_image_ctx.cct, 10) << this << " remote rename request: "
<< payload.image_name << dendl;
bool ImageWatcher::handle_payload(const UnknownPayload &payload,
C_NotifyAck *ack_ctx) {
RWLock::RLocker l(m_image_ctx.owner_lock);
- if (is_lock_owner()) {
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
::encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out);
}
return true;
void ImageWatcher::reregister_watch() {
ldout(m_image_ctx.cct, 10) << this << " re-registering image watch" << dendl;
- RWLock::WLocker l(m_image_ctx.owner_lock);
bool was_lock_owner = false;
- if (m_lock_owner_state == LOCK_OWNER_STATE_LOCKED) {
- // ensure all async requests are canceled and IO is flushed
- was_lock_owner = release_lock();
+ C_SaferCond release_lock_ctx;
+ {
+ RWLock::WLocker l(m_image_ctx.owner_lock);
+ if (m_image_ctx.exclusive_lock != nullptr &&
+ m_image_ctx.exclusive_lock->is_lock_owner()) {
+ was_lock_owner = true;
+ m_image_ctx.exclusive_lock->release_lock(&release_lock_ctx);
+ }
}
int r;
+ if (was_lock_owner) {
+ r = release_lock_ctx.wait();
+ assert(r == 0);
+ }
+
{
RWLock::WLocker l(m_watch_lock);
if (m_watch_state != WATCH_STATE_ERROR) {
m_watch_state = WATCH_STATE_REGISTERED;
}
handle_payload(HeaderUpdatePayload(), NULL);
-
- if (was_lock_owner) {
- r = try_lock();
- if (r == -EBUSY) {
- ldout(m_image_ctx.cct, 5) << this << "lost image lock while "
- << "re-registering image watch" << dendl;
- } else if (r < 0) {
- lderr(m_image_ctx.cct) << this
- << "failed to lock image while re-registering "
- << "image watch" << cpp_strerror(r) << dendl;
- }
- }
}
void ImageWatcher::WatchCtx::handle_notify(uint64_t notify_id,
m_image_watcher.schedule_async_complete(m_async_request_id, r);
}
-void ImageWatcher::notify_listeners_updated_lock(
- LockUpdateState lock_update_state) {
- assert(m_image_ctx.owner_lock.is_locked());
-
- Listeners listeners;
- {
- Mutex::Locker listeners_locker(m_listeners_lock);
- m_listeners_in_use = true;
- listeners = m_listeners;
- }
-
- for (Listeners::iterator it = listeners.begin();
- it != listeners.end(); ++it) {
- (*it)->handle_lock_updated(lock_update_state);
- }
-
- Mutex::Locker listeners_locker(m_listeners_lock);
- m_listeners_in_use = false;
- m_listeners_cond.Signal();
-}
-
ImageWatcher::C_NotifyAck::C_NotifyAck(ImageWatcher *image_watcher,
uint64_t notify_id, uint64_t handle)
: image_watcher(image_watcher), notify_id(notify_id), handle(handle) {
}
} // namespace librbd
-
-std::ostream &operator<<(std::ostream &os,
- const librbd::ImageWatcher::LockUpdateState &state) {
- switch (state) {
- case librbd::ImageWatcher::LOCK_UPDATE_STATE_NOT_SUPPORTED:
- os << "NotSupported";
- break;
- case librbd::ImageWatcher::LOCK_UPDATE_STATE_LOCKED:
- os << "Locked";
- break;
- case librbd::ImageWatcher::LOCK_UPDATE_STATE_RELEASING:
- os << "Releasing";
- break;
- case librbd::ImageWatcher::LOCK_UPDATE_STATE_UNLOCKED:
- os << "Unlocked";
- break;
- case librbd::ImageWatcher::LOCK_UPDATE_STATE_NOTIFICATION:
- os << "Notification";
- break;
- default:
- os << "Unknown (" << static_cast<uint32_t>(state) << ")";
- break;
- }
- return os;
-}
class ImageWatcher {
public:
- enum LockUpdateState {
- LOCK_UPDATE_STATE_NOT_SUPPORTED,
- LOCK_UPDATE_STATE_LOCKED,
- LOCK_UPDATE_STATE_RELEASING,
- LOCK_UPDATE_STATE_UNLOCKED,
- LOCK_UPDATE_STATE_NOTIFICATION
- };
-
- struct Listener {
- virtual ~Listener() {}
-
- virtual bool handle_requested_lock() = 0;
- virtual void handle_lock_updated(LockUpdateState lock_update_state) = 0;
- };
-
ImageWatcher(ImageCtx& image_ctx);
~ImageWatcher();
- bool is_lock_supported() const;
- bool is_lock_supported(const RWLock &snap_lock) const;
- bool is_lock_owner() const;
-
- void register_listener(Listener *listener);
- void unregister_listener(Listener *listener);
-
int register_watch();
int unregister_watch();
- int refresh();
-
- int try_lock();
- void request_lock();
- int release_lock();
-
- void assert_header_locked(librados::ObjectWriteOperation *op);
-
int notify_flatten(uint64_t request_id, ProgressContext &prog_ctx);
int notify_resize(uint64_t request_id, uint64_t size,
ProgressContext &prog_ctx);
ProgressContext &prog_ctx);
int notify_rename(const std::string &image_name);
+ void notify_acquired_lock();
+ void notify_release_lock();
+ void notify_released_lock();
void notify_request_lock();
- void notify_lock_state();
static void notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid);
-private:
-
- enum LockOwnerState {
- LOCK_OWNER_STATE_NOT_LOCKED,
- LOCK_OWNER_STATE_LOCKED,
- LOCK_OWNER_STATE_RELEASING
- };
+ uint64_t get_watch_handle() const {
+ RWLock::RLocker watch_locker(m_watch_lock);
+ return m_watch_handle;
+ }
+private:
enum WatchState {
WATCH_STATE_UNREGISTERED,
WATCH_STATE_REGISTERED,
TASK_CODE_ASYNC_PROGRESS
};
- typedef std::list<Listener *> Listeners;
typedef std::pair<Context *, ProgressContext *> AsyncRequest;
class Task {
ImageCtx &m_image_ctx;
- RWLock m_watch_lock;
+ mutable RWLock m_watch_lock;
WatchCtx m_watch_ctx;
uint64_t m_watch_handle;
WatchState m_watch_state;
- Mutex m_refresh_lock;
- bool m_lock_supported;
-
- LockOwnerState m_lock_owner_state;
-
- Mutex m_listeners_lock;
- Cond m_listeners_cond;
- Listeners m_listeners;
- bool m_listeners_in_use;
-
TaskFinisher<Task> *m_task_finisher;
RWLock m_async_request_lock;
Mutex m_owner_client_id_lock;
watch_notify::ClientId m_owner_client_id;
- std::string encode_lock_cookie() const;
- static bool decode_lock_cookie(const std::string &cookie, uint64_t *handle);
-
- int get_lock_owner_info(entity_name_t *locker, std::string *cookie,
- std::string *address, uint64_t *handle);
- int lock();
- int unlock();
- bool try_request_lock();
-
void schedule_cancel_async_requests();
void cancel_async_requests();
void set_owner_client_id(const watch_notify::ClientId &client_id);
watch_notify::ClientId get_client_id();
- void notify_acquired_lock();
- void notify_release_lock();
- void notify_released_lock();
-
void schedule_request_lock(bool use_timer, int timer_delay = -1);
int notify_lock_owner(bufferlist &bl);
void acknowledge_notify(uint64_t notify_id, uint64_t handle, bufferlist &out);
void reregister_watch();
-
- void notify_listeners_updated_lock(LockUpdateState lock_update_state);
};
} // namespace librbd
-std::ostream &operator<<(std::ostream &os,
- const librbd::ImageWatcher::LockUpdateState &state);
-
#endif // CEPH_LIBRBD_IMAGE_WATCHER_H
#include "librbd/AioCompletion.h"
#include "librbd/AioImageRequestWQ.h"
#include "librbd/AioObjectRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/JournalReplay.h"
#include "librbd/JournalTypes.h"
Journal::Journal(ImageCtx &image_ctx)
: m_image_ctx(image_ctx), m_journaler(NULL),
m_lock("Journal::m_lock"), m_state(STATE_UNINITIALIZED),
- m_lock_listener(this), m_replay_handler(this), m_close_pending(false),
+ m_replay_handler(this), m_close_pending(false),
m_event_lock("Journal::m_event_lock"), m_event_tid(0),
m_blocking_writes(false), m_journal_replay(NULL) {
ldout(m_image_ctx.cct, 5) << this << ": ictx=" << &m_image_ctx << dendl;
- m_image_ctx.image_watcher->register_listener(&m_lock_listener);
-
Mutex::Locker locker(m_lock);
block_writes();
}
assert(m_journal_replay == NULL);
assert(m_wait_for_state_contexts.empty());
- m_image_ctx.image_watcher->unregister_listener(&m_lock_listener);
-
Mutex::Locker locker(m_lock);
unblock_writes();
}
unblock_writes();
}
-
- // kick peers to let them know they can re-request the lock now
- m_image_ctx.image_watcher->notify_lock_state();
}
void Journal::handle_event_safe(int r, uint64_t tid) {
} else {
// send any waiting aio requests now that journal entry is safe
RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- assert(m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock->is_lock_owner());
for (AioObjectRequests::iterator it = aio_object_requests.begin();
it != aio_object_requests.end(); ++it) {
}
}
-bool Journal::handle_requested_lock() {
- Mutex::Locker locker(m_lock);
-
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << this << " " << __func__ << ": " << "state=" << m_state
- << dendl;
-
- // prevent peers from taking our lock while we are replaying since that
- // will stale forward progress
- return (m_state != STATE_INITIALIZING && m_state != STATE_REPLAYING);
-}
-
-void Journal::handle_lock_updated(ImageWatcher::LockUpdateState state) {
-
- CephContext *cct = m_image_ctx.cct;
- ldout(cct, 20) << this << " " << __func__ << ": "
- << "state=" << state << dendl;
-
- Mutex::Locker locker(m_lock);
- if (state == ImageWatcher::LOCK_UPDATE_STATE_LOCKED &&
- m_state == STATE_UNINITIALIZED) {
- create_journaler();
- } else if (state == ImageWatcher::LOCK_UPDATE_STATE_RELEASING) {
- if (m_state == STATE_INITIALIZING || m_state == STATE_REPLAYING) {
- // wait for replay to successfully interrupt
- m_close_pending = true;
- wait_for_state_transition();
- }
-
- if (m_state == STATE_UNINITIALIZED || m_state == STATE_RECORDING) {
- // prevent new write ops but allow pending ops to flush to the journal
- block_writes();
- }
- if (m_state == STATE_RECORDING) {
- flush_journal();
- }
- } else if ((state == ImageWatcher::LOCK_UPDATE_STATE_NOT_SUPPORTED ||
- state == ImageWatcher::LOCK_UPDATE_STATE_UNLOCKED) &&
- m_state != STATE_UNINITIALIZED &&
- m_state != STATE_STOPPING_RECORDING) {
- assert(m_state == STATE_RECORDING);
- {
- Mutex::Locker event_locker(m_event_lock);
- assert(m_events.empty());
- }
-
- int r = stop_recording();
- if (r < 0) {
- // TODO handle failed journal writes
- assert(false);
- }
- }
-}
-
int Journal::stop_recording() {
assert(m_lock.is_locked());
assert(m_journaler != NULL);
#include "common/Cond.h"
#include "journal/Future.h"
#include "journal/ReplayHandler.h"
-#include "librbd/ImageWatcher.h"
#include <algorithm>
#include <list>
#include <string>
};
typedef ceph::unordered_map<uint64_t, Event> Events;
- struct LockListener : public ImageWatcher::Listener {
- Journal *journal;
- LockListener(Journal *_journal) : journal(_journal) {
- }
-
- virtual bool handle_requested_lock() {
- return journal->handle_requested_lock();
- }
- virtual void handle_lock_updated(ImageWatcher::LockUpdateState state) {
- journal->handle_lock_updated(state);
- }
- };
-
struct C_InitJournal : public Context {
Journal *journal;
State m_state;
Contexts m_wait_for_state_contexts;
- LockListener m_lock_listener;
ReplayHandler m_replay_handler;
bool m_close_pending;
void handle_event_safe(int r, uint64_t tid);
- bool handle_requested_lock();
- void handle_lock_updated(ImageWatcher::LockUpdateState state);
-
int stop_recording();
void block_writes();
#include "include/rbd/librbd.hpp"
#include "librbd/AioObjectRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/LibrbdWriteback.h"
<< "journal committed: sending write request" << dendl;
RWLock::RLocker owner_locker(image_ctx->owner_lock);
- assert(image_ctx->image_watcher->is_lock_owner());
+ assert(image_ctx->exclusive_lock->is_lock_owner());
request_sent = true;
AioObjectWrite *req = new AioObjectWrite(image_ctx, oid, object_no, off,
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ObjectMap.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP));
assert(m_image_ctx.owner_lock.is_locked());
assert(m_image_ctx.image_watcher != NULL);
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
object_map::ResizeRequest *req = new object_map::ResizeRequest(
m_image_ctx, &m_object_map, m_snap_id, new_size, default_object_state,
assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
assert(m_image_ctx.owner_lock.is_locked());
assert(m_image_ctx.image_watcher != NULL);
- assert(!m_image_ctx.image_watcher->is_lock_supported(m_image_ctx.snap_lock) ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
assert(m_image_ctx.object_map_lock.is_wlocked());
assert(start_object_no < end_object_no);
#include "common/WorkQueue.h"
#include "librbd/AioImageRequestWQ.h"
#include "librbd/ExclusiveLock.h"
+#include "librbd/ImageWatcher.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/AioObjectRequest.h"
#include "librbd/CopyupRequest.h"
#include "librbd/DiffIterate.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
assert(ictx->owner_lock.is_locked() && !ictx->owner_lock.is_wlocked());
if (ictx->image_watcher == NULL) {
return -EROFS;
- } else if (!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner()) {
- return 0;
}
// need to upgrade to a write lock
int r = 0;
- bool acquired_lock = false;
+ bool trying_lock = false;
+ C_SaferCond ctx;
ictx->owner_lock.put_read();
{
- RWLock::WLocker l(ictx->owner_lock);
- if (!ictx->image_watcher->is_lock_owner()) {
- r = ictx->image_watcher->try_lock();
- acquired_lock = ictx->image_watcher->is_lock_owner();
+ RWLock::WLocker owner_locker(ictx->owner_lock);
+ if (ictx->exclusive_lock != nullptr &&
+ !ictx->exclusive_lock->is_lock_owner()) {
+ ictx->exclusive_lock->try_lock(&ctx);
+ trying_lock = true;
}
}
- if (acquired_lock) {
- // finish any AIO that was previously waiting on acquiring the
- // exclusive lock
- ictx->flush_async_operations();
+
+ if (trying_lock) {
+ r = ctx.wait();
}
ictx->owner_lock.get_read();
+
return r;
}
}
}
- while (ictx->image_watcher->is_lock_supported()) {
+ while (ictx->exclusive_lock != nullptr) {
r = prepare_image_update(ictx);
if (r < 0) {
return -EROFS;
- } else if (ictx->image_watcher->is_lock_owner()) {
+ } else if (ictx->exclusive_lock->is_lock_owner()) {
break;
}
void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx)
{
assert(ictx->owner_lock.is_locked());
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
C_SaferCond ctx;
ictx->snap_lock.get_read();
void snap_create_helper(ImageCtx* ictx, Context* ctx, const char* snap_name) {
assert(ictx->owner_lock.is_locked());
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
ldout(ictx->cct, 20) << "snap_create_helper " << ictx << " " << snap_name
<< dendl;
assert(ictx->owner_lock.is_locked());
{
if ((ictx->features & RBD_FEATURE_FAST_DIFF) != 0) {
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
}
}
const uint64_t src_snap_id, const char* dst_name) {
assert(ictx->owner_lock.is_locked());
if ((ictx->features & RBD_FEATURE_JOURNALING) != 0) {
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
}
ldout(ictx->cct, 20) << __func__ << " " << ictx << " from "
<< src_snap_id << " to " << dst_name << dendl;
{
assert(ictx->owner_lock.is_locked());
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
}
ldout(ictx->cct, 20) << "snap_protect_helper " << ictx << " " << snap_name
{
assert(ictx->owner_lock.is_locked());
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
}
ldout(ictx->cct, 20) << "snap_unprotect_helper " << ictx << " " << snap_name
RWLock::RLocker owner_locker(p_imctx->owner_lock);
r = ictx_refresh(p_imctx);
}
+
if (r == 0) {
p_imctx->snap_lock.get_read();
r = p_imctx->is_snap_protected(p_imctx->snap_id, &snap_protected);
{
assert(ictx->owner_lock.is_locked());
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
}
ldout(ictx->cct, 20) << "rename_helper " << ictx << " " << dstname
int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner)
{
RWLock::RLocker l(ictx->owner_lock);
- *is_owner = (ictx->image_watcher != NULL &&
- ictx->image_watcher->is_lock_owner());
+ *is_owner = (ictx->exclusive_lock != nullptr &&
+ ictx->exclusive_lock->is_lock_owner());
return 0;
}
id = ictx->id;
ictx->owner_lock.get_read();
- if (ictx->image_watcher->is_lock_supported()) {
+ if (ictx->exclusive_lock != nullptr) {
r = prepare_image_update(ictx);
- if (r < 0 || !ictx->image_watcher->is_lock_owner()) {
+ if (r < 0 || !ictx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
ictx->owner_lock.put_read();
close_image(ictx);
ProgressContext &prog_ctx)
{
assert(ictx->owner_lock.is_locked());
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
CephContext *cct = ictx->cct;
ictx->snap_lock.get_read();
} // release snap_lock and cache_lock
if (ictx->image_watcher != NULL) {
- ictx->image_watcher->refresh();
+ // TODO handled by new async refresh state machine
+ //ictx->image_watcher->refresh();
}
if (new_snap) {
if (r < 0) {
return -EROFS;
}
- if (ictx->image_watcher->is_lock_supported() &&
- !ictx->image_watcher->is_lock_owner()) {
+ if (ictx->exclusive_lock != nullptr &&
+ !ictx->exclusive_lock->is_lock_owner()) {
return -EROFS;
}
if (snapshot_mode) {
{
RWLock::WLocker owner_locker(ictx->owner_lock);
- if (ictx->image_watcher != NULL &&
- ictx->image_watcher->is_lock_owner()) {
- r = ictx->image_watcher->release_lock();
- if (r < 0) {
- return r;
- }
- }
+ // TODO handled by new async set snap state machine
+ //if (ictx->image_watcher != NULL &&
+ // ictx->image_watcher->is_lock_owner()) {
+ // r = ictx->image_watcher->release_lock();
+ // if (r < 0) {
+ // return r;
+ // }
+ //}
}
ictx->cancel_async_requests();
RWLock::RLocker owner_locker(ictx->owner_lock);
if (ictx->image_watcher != NULL) {
- ictx->image_watcher->refresh();
+ // TODO handled by new async set snap request state machine
+ //ictx->image_watcher->refresh();
}
return r;
}
if (!ictx->read_only) {
r = ictx->register_watch();
if (r < 0) {
- lderr(ictx->cct) << "error registering a watch: " << cpp_strerror(r)
- << dendl;
- goto err_close;
+ lderr(ictx->cct) << "error registering a watch: " << cpp_strerror(r)
+ << dendl;
+ goto err_close;
}
}
if (ictx->image_watcher != NULL) {
RWLock::RLocker owner_locker(ictx->owner_lock);
- ictx->image_watcher->refresh();
+ // TODO handled by new async open image state machine
+ //ictx->image_watcher->refresh();
}
return 0;
{
// release the lock (and flush all in-flight IO)
RWLock::WLocker owner_locker(ictx->owner_lock);
- if (ictx->image_watcher != NULL && ictx->image_watcher->is_lock_owner()) {
- r = ictx->image_watcher->release_lock();
- if (r < 0) {
- lderr(ictx->cct) << "error releasing image lock: " << cpp_strerror(r)
- << dendl;
- }
- }
+ // TODO replaced by new async close request
}
assert(!ictx->aio_work_queue->writes_blocked() ||
void async_flatten(ImageCtx *ictx, Context *ctx, ProgressContext &prog_ctx)
{
assert(ictx->owner_lock.is_locked());
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
CephContext *cct = ictx->cct;
ldout(cct, 20) << "flatten" << dendl;
void async_rebuild_object_map(ImageCtx *ictx, Context *ctx,
ProgressContext &prog_ctx) {
assert(ictx->owner_lock.is_locked());
- assert(!ictx->image_watcher->is_lock_supported() ||
- ictx->image_watcher->is_lock_owner());
+ assert(ictx->exclusive_lock == nullptr ||
+ ictx->exclusive_lock->is_lock_owner());
CephContext *cct = ictx->cct;
ldout(cct, 20) << "async_rebuild_object_map " << ictx << dendl;
#include "librbd/object_map/InvalidateRequest.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
}
// do not update on-disk flags if not image owner
- if (image_ctx.image_watcher == NULL ||
- (image_ctx.image_watcher->is_lock_supported(image_ctx.snap_lock) &&
- !image_ctx.image_watcher->is_lock_owner() && !m_force)) {
+ if (image_ctx.image_watcher == nullptr ||
+ (!m_force && m_snap_id == CEPH_NOSNAP &&
+ image_ctx.exclusive_lock != nullptr &&
+ !image_ctx.exclusive_lock->is_lock_owner())) {
this->async_complete(0);
return;
}
lderr(cct) << this << " invalidating object map on-disk" << dendl;
librados::ObjectWriteOperation op;
- if (m_snap_id == CEPH_NOSNAP && !m_force) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr &&
+ m_snap_id == CEPH_NOSNAP && !m_force) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::set_flags(&op, m_snap_id, flags, flags);
#include "librbd/operation/FlattenRequest.h"
#include "librbd/AioObjectRequest.h"
#include "librbd/AsyncObjectThrottle.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
assert(image_ctx.owner_lock.is_locked());
CephContext *cct = image_ctx.cct;
- if (image_ctx.image_watcher->is_lock_supported() &&
- !image_ctx.image_watcher->is_lock_owner()) {
+ if (image_ctx.exclusive_lock != nullptr &&
+ !image_ctx.exclusive_lock->is_lock_owner()) {
ldout(cct, 1) << "lost exclusive lock during flatten" << dendl;
return -ERESTART;
}
m_state = STATE_UPDATE_HEADER;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
{
RWLock::RLocker parent_locker(image_ctx.parent_lock);
// remove parent from this (base) image
librados::ObjectWriteOperation op;
- if (image_ctx.image_watcher->is_lock_supported()) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::remove_parent(&op);
CephContext *cct = image_ctx.cct;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
// if there are no snaps, remove from the children object as well
// (if snapshots remain, they have their own parent info, and the child
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AsyncObjectThrottle.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
CephContext *cct = image_ctx.cct;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
RWLock::WLocker l(image_ctx.object_map_lock);
uint8_t state = image_ctx.object_map[m_object_no];
m_state = STATE_RESIZE_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
m_image_ctx.object_map.aio_resize(size, OBJECT_NONEXISTENT,
this->create_callback_context());
}
RWLock::RLocker l(m_image_ctx.owner_lock);
// should have been canceled prior to releasing lock
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
ldout(cct, 5) << this << " send_trim_image" << dendl;
m_state = STATE_TRIM_IMAGE;
m_state = STATE_SAVE_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
m_image_ctx.object_map.aio_save(this->create_callback_context());
}
assert(m_image_ctx.owner_lock.is_locked());
// should have been canceled prior to releasing lock
- assert(!m_image_ctx.image_watcher->is_lock_supported() ||
- m_image_ctx.image_watcher->is_lock_owner());
+ assert(m_image_ctx.exclusive_lock == nullptr ||
+ m_image_ctx.exclusive_lock->is_lock_owner());
ldout(m_image_ctx.cct, 5) << this << " send_update_header" << dendl;
m_state = STATE_UPDATE_HEADER;
librados::ObjectWriteOperation op;
- if (m_image_ctx.image_watcher->is_lock_supported()) {
- m_image_ctx.image_watcher->assert_header_locked(&op);
+ if (m_image_ctx.exclusive_lock != nullptr) {
+ m_image_ctx.exclusive_lock->assert_header_locked(&op);
}
uint64_t flags = RBD_FLAG_OBJECT_MAP_INVALID | RBD_FLAG_FAST_DIFF_INVALID;
// vim: ts=8 sw=2 smarttab
#include "librbd/operation/ResizeRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
m_state = STATE_GROW_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
this->create_callback_context());
m_state = STATE_SHRINK_OBJECT_MAP;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
this->create_callback_context());
m_state = STATE_UPDATE_HEADER;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
librados::ObjectWriteOperation op;
if (image_ctx.old_format) {
bl.append(reinterpret_cast<const char*>(&m_new_size), sizeof(m_new_size));
op.write(offsetof(rbd_obj_header_ondisk, image_size), bl);
} else {
- if (image_ctx.image_watcher->is_lock_supported()) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::set_size(&op, m_new_size);
}
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/AioImageRequestWQ.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
m_state = STATE_CREATE_SNAP;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported(image_ctx.snap_lock) ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
// save current size / parent info for creating snapshot record in ImageCtx
m_size = image_ctx.size;
if (image_ctx.old_format) {
cls_client::old_snapshot_add(&op, m_snap_id, m_snap_name);
} else {
- if (image_ctx.image_watcher->is_lock_owner()) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::snapshot_add(&op, m_snap_id, m_snap_name);
}
ldout(cct, 5) << this << " " << __func__ << dendl;
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported(image_ctx.snap_lock) ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
// immediately add a reference to the new snapshot
image_ctx.add_snap(m_snap_name, m_snap_id, m_size, m_parent_info,
#include "librbd/operation/SnapshotRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
if (image_ctx.old_format) {
cls_client::old_snapshot_remove(&op, m_snap_name);
} else {
- if (image_ctx.image_watcher->is_lock_owner()) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr &&
+ image_ctx.exclusive_lock->is_lock_owner()) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::snapshot_remove(&op, m_snap_id);
}
#include "librbd/operation/SnapshotRenameRequest.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
if (image_ctx.old_format) {
cls_client::old_snapshot_rename(&op, m_snap_id, m_snap_name);
} else {
- if (image_ctx.image_watcher->is_lock_owner()) {
- image_ctx.image_watcher->assert_header_locked(&op);
+ if (image_ctx.exclusive_lock != nullptr &&
+ image_ctx.exclusive_lock->is_lock_owner()) {
+ image_ctx.exclusive_lock->assert_header_locked(&op);
}
cls_client::snapshot_rename(&op, m_snap_id, m_snap_name);
}
#include "librbd/operation/TrimRequest.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/AioObjectRequest.h"
+#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/internal.h"
virtual int send() {
I &image_ctx = this->m_image_ctx;
assert(image_ctx.owner_lock.is_locked());
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
string oid = image_ctx.get_object_name(m_object_no);
ldout(image_ctx.cct, 10) << "removing (with copyup) " << oid << dendl;
virtual int send() {
I &image_ctx = this->m_image_ctx;
assert(image_ctx.owner_lock.is_locked());
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
if (!image_ctx.object_map.object_may_exist(m_object_no)) {
return 1;
}
void TrimRequest<I>::send_copyup_objects() {
I &image_ctx = this->m_image_ctx;
assert(image_ctx.owner_lock.is_locked());
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_PRE_REMOVE;
- assert(image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock->is_lock_owner());
// flag the objects as pending deletion
Context *ctx = this->create_callback_context();
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_POST_REMOVE;
- assert(image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock->is_lock_owner());
// flag the pending objects as removed
Context *ctx = this->create_callback_context();
}
// should have been canceled prior to releasing lock
- assert(!image_ctx.image_watcher->is_lock_supported() ||
- image_ctx.image_watcher->is_lock_owner());
+ assert(image_ctx.exclusive_lock == nullptr ||
+ image_ctx.exclusive_lock->is_lock_owner());
uint64_t delete_len = m_delete_off - m_new_size;
ldout(image_ctx.cct, 5) << this << " send_clean_boundary: "
<< " delete_off=" << m_delete_off