From: Kefu Chai Date: Sun, 7 Jul 2019 04:38:40 +0000 (+0800) Subject: librbd: s/Mutex/ceph::mutex/ X-Git-Tag: v15.1.0~1971^2~38 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=a79837bcee5358ded3074a7c1b470737fd7c50e3;p=ceph.git librbd: s/Mutex/ceph::mutex/ Signed-off-by: Kefu Chai --- diff --git a/src/librbd/AsyncObjectThrottle.cc b/src/librbd/AsyncObjectThrottle.cc index b6dbcb261399..c62c84539404 100644 --- a/src/librbd/AsyncObjectThrottle.cc +++ b/src/librbd/AsyncObjectThrottle.cc @@ -15,7 +15,8 @@ AsyncObjectThrottle::AsyncObjectThrottle( const AsyncRequest* async_request, T &image_ctx, const ContextFactory& context_factory, Context *ctx, ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no) - : m_lock(util::unique_lock_name("librbd::AsyncThrottle::m_lock", this)), + : m_lock(ceph::make_mutex( + util::unique_lock_name("librbd::AsyncThrottle::m_lock", this))), m_async_request(async_request), m_image_ctx(image_ctx), m_context_factory(context_factory), m_ctx(ctx), m_prog_ctx(prog_ctx), m_object_no(object_no), m_end_object_no(end_object_no), m_current_ops(0), @@ -25,10 +26,10 @@ AsyncObjectThrottle::AsyncObjectThrottle( template void AsyncObjectThrottle::start_ops(uint64_t max_concurrent) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); bool complete; { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; for (uint64_t i = 0; i < max_concurrent; ++i) { start_next_op(); if (m_ret < 0 && m_current_ops == 0) { @@ -48,8 +49,8 @@ template void AsyncObjectThrottle::finish_op(int r) { bool complete; { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - Mutex::Locker locker(m_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::lock_guard locker{m_lock}; --m_current_ops; if (r < 0 && r != -ENOENT && m_ret == 0) { m_ret = r; diff --git a/src/librbd/AsyncObjectThrottle.h b/src/librbd/AsyncObjectThrottle.h index e1b089626c34..64397f9e4b42 100644 --- a/src/librbd/AsyncObjectThrottle.h +++ b/src/librbd/AsyncObjectThrottle.h @@ -58,7 +58,7 @@ public: void finish_op(int r) override; private: - Mutex m_lock; + ceph::mutex m_lock; const AsyncRequest *m_async_request; ImageCtxT &m_image_ctx; ContextFactory m_context_factory; diff --git a/src/librbd/AsyncRequest.cc b/src/librbd/AsyncRequest.cc index 8a76a226474d..67ea116a1515 100644 --- a/src/librbd/AsyncRequest.cc +++ b/src/librbd/AsyncRequest.cc @@ -43,7 +43,7 @@ Context *AsyncRequest::create_async_callback_context() { template void AsyncRequest::start_request() { - Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock); + std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock}; m_image_ctx.async_requests.push_back(&m_xlist_item); } @@ -51,7 +51,7 @@ template void AsyncRequest::finish_request() { decltype(m_image_ctx.async_requests_waiters) waiters; { - Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock); + std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock}; ceph_assert(m_xlist_item.remove_myself()); if (m_image_ctx.async_requests.empty()) { diff --git a/src/librbd/BlockGuard.h b/src/librbd/BlockGuard.h index 062f1901d61b..1b59ab788755 100644 --- a/src/librbd/BlockGuard.h +++ b/src/librbd/BlockGuard.h @@ -6,7 +6,7 @@ #include "include/int_types.h" #include "common/dout.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include #include #include @@ -48,7 +48,7 @@ public: typedef std::list BlockOperations; BlockGuard(CephContext *cct) - : m_cct(cct), m_lock("librbd::BlockGuard::m_lock") { + : m_cct(cct) { } BlockGuard(const BlockGuard&) = delete; @@ -63,7 +63,7 @@ public: */ int detain(const BlockExtent &block_extent, BlockOperation *block_operation, BlockGuardCell **cell) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ldout(m_cct, 20) << "block_start=" << block_extent.block_start << ", " << "block_end=" << block_extent.block_end << ", " << "free_slots=" << m_free_detained_block_extents.size() @@ -104,7 +104,7 @@ public: * Release any detained IO operations from the provided cell. */ void release(BlockGuardCell *cell, BlockOperations *block_operations) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(cell != nullptr); auto &detained_block_extent = reinterpret_cast( @@ -158,7 +158,7 @@ private: CephContext *m_cct; - Mutex m_lock; + ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock"); DetainedBlockExtentsPool m_detained_block_extent_pool; DetainedBlockExtents m_free_detained_block_extents; BlockExtentToDetainedBlockExtents m_detained_block_extents; diff --git a/src/librbd/DeepCopyRequest.cc b/src/librbd/DeepCopyRequest.cc index 07f4e79044ea..ddbd34a76118 100644 --- a/src/librbd/DeepCopyRequest.cc +++ b/src/librbd/DeepCopyRequest.cc @@ -39,7 +39,7 @@ DeepCopyRequest::DeepCopyRequest(I *src_image_ctx, I *dst_image_ctx, m_object_number(object_number), m_work_queue(work_queue), m_snap_seqs(snap_seqs), m_prog_ctx(prog_ctx), m_on_finish(on_finish), m_cct(dst_image_ctx->cct), - m_lock(unique_lock_name("DeepCopyRequest::m_lock", this)) { + m_lock(ceph::make_mutex(unique_lock_name("DeepCopyRequest::m_lock", this))) { } template @@ -61,7 +61,7 @@ void DeepCopyRequest::send() { template void DeepCopyRequest::cancel() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ldout(m_cct, 20) << dendl; @@ -78,9 +78,9 @@ void DeepCopyRequest::cancel() { template void DeepCopyRequest::send_copy_snapshots() { - m_lock.Lock(); + m_lock.lock(); if (m_canceled) { - m_lock.Unlock(); + m_lock.unlock(); finish(-ECANCELED); return; } @@ -93,7 +93,7 @@ void DeepCopyRequest::send_copy_snapshots() { m_src_image_ctx, m_dst_image_ctx, m_snap_id_end, m_flatten, m_work_queue, m_snap_seqs, ctx); m_snapshot_copy_request->get(); - m_lock.Unlock(); + m_lock.unlock(); m_snapshot_copy_request->send(); } @@ -103,7 +103,7 @@ void DeepCopyRequest::handle_copy_snapshots(int r) { ldout(m_cct, 20) << "r=" << r << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_snapshot_copy_request->put(); m_snapshot_copy_request = nullptr; if (r == 0 && m_canceled) { @@ -131,9 +131,9 @@ void DeepCopyRequest::handle_copy_snapshots(int r) { template void DeepCopyRequest::send_copy_image() { - m_lock.Lock(); + m_lock.lock(); if (m_canceled) { - m_lock.Unlock(); + m_lock.unlock(); finish(-ECANCELED); return; } @@ -146,7 +146,7 @@ void DeepCopyRequest::send_copy_image() { m_src_image_ctx, m_dst_image_ctx, m_snap_id_start, m_snap_id_end, m_flatten, m_object_number, *m_snap_seqs, m_prog_ctx, ctx); m_image_copy_request->get(); - m_lock.Unlock(); + m_lock.unlock(); m_image_copy_request->send(); } @@ -156,7 +156,7 @@ void DeepCopyRequest::handle_copy_image(int r) { ldout(m_cct, 20) << "r=" << r << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_image_copy_request->put(); m_image_copy_request = nullptr; if (r == 0 && m_canceled) { @@ -179,19 +179,19 @@ void DeepCopyRequest::handle_copy_image(int r) { template void DeepCopyRequest::send_copy_object_map() { - m_dst_image_ctx->owner_lock.get_read(); - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->owner_lock.lock_shared(); + m_dst_image_ctx->image_lock.lock_shared(); if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP, m_dst_image_ctx->image_lock)) { - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); send_copy_metadata(); return; } if (m_snap_id_end == CEPH_NOSNAP) { - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); send_refresh_object_map(); return; } @@ -207,8 +207,8 @@ void DeepCopyRequest::send_copy_object_map() { } if (finish_op_ctx == nullptr) { lderr(m_cct) << "lost exclusive lock" << dendl; - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); finish(r); return; } @@ -221,8 +221,8 @@ void DeepCopyRequest::send_copy_object_map() { ceph_assert(m_snap_seqs->count(m_snap_id_end) > 0); librados::snap_t copy_snap_id = (*m_snap_seqs)[m_snap_id_end]; m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx); - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); } template @@ -244,7 +244,7 @@ void DeepCopyRequest::send_refresh_object_map() { int r; Context *finish_op_ctx = nullptr; { - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; if (m_dst_image_ctx->exclusive_lock != nullptr) { finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r); } @@ -279,7 +279,7 @@ void DeepCopyRequest::handle_refresh_object_map(int r) { } { - RWLock::WLocker image_locker(m_dst_image_ctx->image_lock); + std::unique_lock image_locker{m_dst_image_ctx->image_lock}; std::swap(m_dst_image_ctx->object_map, m_object_map); } delete m_object_map; @@ -313,7 +313,7 @@ void DeepCopyRequest::handle_copy_metadata(int r) { template int DeepCopyRequest::validate_copy_points() { - RWLock::RLocker image_locker(m_src_image_ctx->image_lock); + std::shared_lock image_locker{m_src_image_ctx->image_lock}; if (m_snap_id_start != 0 && m_src_image_ctx->snap_info.find(m_snap_id_start) == diff --git a/src/librbd/DeepCopyRequest.h b/src/librbd/DeepCopyRequest.h index dba07e6a862d..77b87ddc94fd 100644 --- a/src/librbd/DeepCopyRequest.h +++ b/src/librbd/DeepCopyRequest.h @@ -4,7 +4,7 @@ #ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H #define CEPH_LIBRBD_DEEP_COPY_REQUEST_H -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/RefCountedObj.h" #include "include/int_types.h" #include "librbd/ImageCtx.h" @@ -96,7 +96,7 @@ private: Context *m_on_finish; CephContext *m_cct; - Mutex m_lock; + ceph::mutex m_lock; bool m_canceled = false; deep_copy::SnapshotCopyRequest *m_snapshot_copy_request = nullptr; diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc index 71d98c5b2e64..3d3ae93ca3fb 100644 --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@ -10,7 +10,7 @@ #include "librbd/exclusive_lock/PreReleaseRequest.h" #include "librbd/io/ImageRequestWQ.h" #include "librbd/Utils.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/dout.h" #define dout_subsys ceph_subsys_rbd @@ -32,13 +32,13 @@ ExclusiveLock::ExclusiveLock(I &image_ctx) image_ctx.config.template get_val("rbd_blacklist_on_break_lock"), image_ctx.config.template get_val("rbd_blacklist_expire_seconds")), m_image_ctx(image_ctx) { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ML::set_state_uninitialized(); } template bool ExclusiveLock::accept_requests(int *ret_val) const { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; bool accept_requests = (!ML::is_state_shutdown() && ML::is_state_locked() && @@ -53,21 +53,21 @@ bool ExclusiveLock::accept_requests(int *ret_val) const { template bool ExclusiveLock::accept_ops() const { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; bool accept = accept_ops(ML::m_lock); ldout(m_image_ctx.cct, 20) << "=" << accept << dendl; return accept; } template -bool ExclusiveLock::accept_ops(const Mutex &lock) const { +bool ExclusiveLock::accept_ops(const ceph::mutex &lock) const { return (!ML::is_state_shutdown() && (ML::is_state_locked() || ML::is_state_post_acquiring())); } template void ExclusiveLock::block_requests(int r) { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; m_request_blocked_count++; if (m_request_blocked_ret_val == 0) { @@ -79,7 +79,7 @@ void ExclusiveLock::block_requests(int r) { template void ExclusiveLock::unblock_requests() { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ceph_assert(m_request_blocked_count > 0); m_request_blocked_count--; @@ -100,11 +100,11 @@ int ExclusiveLock::get_unlocked_op_error() const { template void ExclusiveLock::init(uint64_t features, Context *on_init) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ldout(m_image_ctx.cct, 10) << dendl; { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ML::set_state_initializing(); } @@ -124,7 +124,7 @@ void ExclusiveLock::shut_down(Context *on_shut_down) { template void ExclusiveLock::handle_peer_notification(int r) { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; if (!ML::is_state_waiting_for_lock()) { return; } @@ -138,8 +138,8 @@ void ExclusiveLock::handle_peer_notification(int r) { template Context *ExclusiveLock::start_op(int* ret_val) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - Mutex::Locker locker(ML::m_lock); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + std::lock_guard locker{ML::m_lock}; if (!accept_ops(ML::m_lock)) { *ret_val = get_unlocked_op_error(); @@ -157,7 +157,7 @@ void ExclusiveLock::handle_init_complete(uint64_t features) { ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl; { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.clone_copy_on_read || (features & RBD_FEATURE_JOURNALING) != 0) { m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true); @@ -166,7 +166,7 @@ void ExclusiveLock::handle_init_complete(uint64_t features) { } } - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ML::set_state_unlocked(); } @@ -175,7 +175,7 @@ void ExclusiveLock::shutdown_handler(int r, Context *on_finish) { ldout(m_image_ctx.cct, 10) << dendl; { - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); + std::unique_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false); m_image_ctx.exclusive_lock = nullptr; } @@ -190,7 +190,7 @@ void ExclusiveLock::pre_acquire_lock_handler(Context *on_finish) { int acquire_lock_peer_ret_val = 0; { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val); } @@ -216,7 +216,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { on_finish->complete(r); return; } else if (r < 0) { - ML::m_lock.Lock(); + ML::m_lock.lock(); ceph_assert(ML::is_state_acquiring()); // PostAcquire state machine will not run, so we need complete prepare @@ -225,7 +225,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { // if lock is in-use by another client, request the lock if (ML::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) { ML::set_state_waiting_for_lock(); - ML::m_lock.Unlock(); + ML::m_lock.unlock(); // request the lock from a peer m_image_ctx.image_watcher->notify_request_lock(); @@ -233,7 +233,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { // inform manage lock that we have interrupted the state machine r = -ECANCELED; } else { - ML::m_lock.Unlock(); + ML::m_lock.unlock(); // clear error if peer owns lock if (r == -EAGAIN) { @@ -245,7 +245,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { return; } - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; m_pre_post_callback = on_finish; using EL = ExclusiveLock; PostAcquireRequest *req = PostAcquireRequest::create(m_image_ctx, @@ -261,7 +261,7 @@ template void ExclusiveLock::handle_post_acquiring_lock(int r) { ldout(m_image_ctx.cct, 10) << dendl; - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ceph_assert(r == 0); @@ -275,7 +275,7 @@ void ExclusiveLock::handle_post_acquired_lock(int r) { Context *on_finish = nullptr; { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ceph_assert(ML::is_state_acquiring() || ML::is_state_post_acquiring()); assert (m_pre_post_callback != nullptr); @@ -297,7 +297,7 @@ template void ExclusiveLock::pre_release_lock_handler(bool shutting_down, Context *on_finish) { ldout(m_image_ctx.cct, 10) << dendl; - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; PreReleaseRequest *req = PreReleaseRequest::create( m_image_ctx, shutting_down, m_async_op_tracker, on_finish); @@ -313,7 +313,7 @@ void ExclusiveLock::post_release_lock_handler(bool shutting_down, int r, << shutting_down << dendl; if (!shutting_down) { { - Mutex::Locker locker(ML::m_lock); + std::lock_guard locker{ML::m_lock}; ceph_assert(ML::is_state_pre_releasing() || ML::is_state_releasing()); } @@ -322,7 +322,7 @@ void ExclusiveLock::post_release_lock_handler(bool shutting_down, int r, } } else { { - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); + std::unique_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false); m_image_ctx.exclusive_lock = nullptr; } diff --git a/src/librbd/ExclusiveLock.h b/src/librbd/ExclusiveLock.h index 8b2a411f4423..233d632a216d 100644 --- a/src/librbd/ExclusiveLock.h +++ b/src/librbd/ExclusiveLock.h @@ -94,7 +94,7 @@ private: int m_acquire_lock_peer_ret_val = 0; - bool accept_ops(const Mutex &lock) const; + bool accept_ops(const ceph::mutex &lock) const; void handle_init_complete(uint64_t features); void handle_post_acquiring_lock(int r); diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc index 8c5a07eacaf5..b5e0390b3654 100644 --- a/src/librbd/ImageCtx.cc +++ b/src/librbd/ImageCtx.cc @@ -76,15 +76,14 @@ public: class SafeTimerSingleton : public SafeTimer { public: - Mutex lock; + ceph::mutex lock = ceph::make_mutex("librbd::Journal::SafeTimerSingleton::lock"); explicit SafeTimerSingleton(CephContext *cct) - : SafeTimer(cct, lock, true), - lock("librbd::Journal::SafeTimerSingleton::lock") { + : SafeTimer(cct, lock, true) { init(); } ~SafeTimerSingleton() { - Mutex::Locker locker(lock); + std::lock_guard locker{lock}; shutdown(); } }; @@ -105,11 +104,11 @@ public: name(image_name), image_watcher(NULL), journal(NULL), - owner_lock(util::unique_lock_name("librbd::ImageCtx::owner_lock", this)), - image_lock(util::unique_lock_name("librbd::ImageCtx::image_lock", this)), - timestamp_lock(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this)), - async_ops_lock(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this)), - copyup_list_lock(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this)), + owner_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::owner_lock", this))), + image_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::image_lock", this))), + timestamp_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this))), + async_ops_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this))), + copyup_list_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this))), extra_read_flags(0), old_format(false), order(0), size(0), features(0), @@ -319,7 +318,7 @@ public: } int ImageCtx::snap_set(uint64_t in_snap_id) { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); auto it = snap_info.find(in_snap_id); if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) { snap_id = in_snap_id; @@ -334,7 +333,7 @@ public: void ImageCtx::snap_unset() { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); snap_id = CEPH_NOSNAP; snap_namespace = {}; snap_name = ""; @@ -345,7 +344,7 @@ public: snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace, const string& in_snap_name) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); auto it = snap_ids.find({in_snap_namespace, in_snap_name}); if (it != snap_ids.end()) { return it->second; @@ -355,7 +354,7 @@ public: const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); map::const_iterator it = snap_info.find(in_snap_id); if (it != snap_info.end()) @@ -366,7 +365,7 @@ public: int ImageCtx::get_snap_name(snap_t in_snap_id, string *out_snap_name) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *out_snap_name = info->name; @@ -378,7 +377,7 @@ public: int ImageCtx::get_snap_namespace(snap_t in_snap_id, cls::rbd::SnapshotNamespace *out_snap_namespace) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *out_snap_namespace = info->snap_namespace; @@ -400,7 +399,7 @@ public: uint64_t ImageCtx::get_current_size() const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); return size; } @@ -445,20 +444,20 @@ public: void ImageCtx::set_access_timestamp(utime_t at) { - ceph_assert(timestamp_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(timestamp_lock)); access_timestamp = at; } void ImageCtx::set_modify_timestamp(utime_t mt) { - ceph_assert(timestamp_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(timestamp_lock)); modify_timestamp = mt; } int ImageCtx::is_snap_protected(snap_t in_snap_id, bool *is_protected) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *is_protected = @@ -471,7 +470,7 @@ public: int ImageCtx::is_snap_unprotected(snap_t in_snap_id, bool *is_unprotected) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); const SnapInfo *info = get_snap_info(in_snap_id); if (info) { *is_unprotected = @@ -488,7 +487,7 @@ public: uint8_t protection_status, uint64_t flags, utime_t timestamp) { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); snaps.push_back(id); SnapInfo info(in_snap_name, in_snap_namespace, in_size, parent, protection_status, flags, timestamp); @@ -500,7 +499,7 @@ public: string in_snap_name, snap_t id) { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end()); snap_info.erase(id); snap_ids.erase({in_snap_namespace, in_snap_name}); @@ -508,7 +507,7 @@ public: uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); if (in_snap_id == CEPH_NOSNAP) { if (!resize_reqs.empty() && resize_reqs.front()->shrinking()) { @@ -525,40 +524,40 @@ public: } uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); uint64_t image_size = get_image_size(in_snap_id); return Striper::get_num_objects(layout, image_size); } bool ImageCtx::test_features(uint64_t features) const { - RWLock::RLocker l(image_lock); + std::shared_lock l{image_lock}; return test_features(features, image_lock); } bool ImageCtx::test_features(uint64_t in_features, - const RWLock &in_image_lock) const + const ceph::shared_mutex &in_image_lock) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); return ((features & in_features) == in_features); } bool ImageCtx::test_op_features(uint64_t in_op_features) const { - RWLock::RLocker image_locker(image_lock); + std::shared_lock l{image_lock}; return test_op_features(in_op_features, image_lock); } bool ImageCtx::test_op_features(uint64_t in_op_features, - const RWLock &in_image_lock) const + const ceph::shared_mutex &in_image_lock) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); return ((op_features & in_op_features) == in_op_features); } int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); if (_snap_id == CEPH_NOSNAP) { *_flags = flags; return 0; @@ -574,15 +573,16 @@ public: int ImageCtx::test_flags(librados::snap_t in_snap_id, uint64_t flags, bool *flags_set) const { - RWLock::RLocker l(image_lock); + std::shared_lock l{image_lock}; return test_flags(in_snap_id, flags, image_lock, flags_set); } int ImageCtx::test_flags(librados::snap_t in_snap_id, - uint64_t flags, const RWLock &in_image_lock, + uint64_t flags, + const ceph::shared_mutex &in_image_lock, bool *flags_set) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); uint64_t snap_flags; int r = get_flags(in_snap_id, &snap_flags); if (r < 0) { @@ -594,7 +594,7 @@ public: int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled) { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); uint64_t *_flags; if (in_snap_id == CEPH_NOSNAP) { _flags = &flags; @@ -616,7 +616,7 @@ public: const ParentImageInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); if (in_snap_id == CEPH_NOSNAP) return &parent_md; const SnapInfo *info = get_snap_info(in_snap_id); @@ -651,7 +651,7 @@ public: int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); const auto info = get_parent_info(in_snap_id); if (info) { *overlap = info->overlap; @@ -695,7 +695,7 @@ public: void ImageCtx::cancel_async_requests(Context *on_finish) { { - Mutex::Locker async_ops_locker(async_ops_lock); + std::lock_guard async_ops_locker{async_ops_lock}; if (!async_requests.empty()) { ldout(cct, 10) << "canceling async requests: count=" << async_requests.size() << dendl; @@ -823,8 +823,8 @@ public: void ImageCtx::set_image_name(const std::string &image_name) { // update the name so rename can be invoked repeatedly - RWLock::RLocker owner_locker(owner_lock); - RWLock::WLocker image_locker(image_lock); + std::shared_lock owner_locker{owner_lock}; + std::unique_lock image_locker{image_lock}; name = image_name; if (old_format) { header_oid = util::old_header_name(image_name); @@ -842,26 +842,26 @@ public: } exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const { - ceph_assert(owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(owner_lock)); ceph_assert(exclusive_lock_policy != nullptr); return exclusive_lock_policy; } void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) { - ceph_assert(owner_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(owner_lock)); ceph_assert(policy != nullptr); delete exclusive_lock_policy; exclusive_lock_policy = policy; } journal::Policy *ImageCtx::get_journal_policy() const { - ceph_assert(image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_lock)); ceph_assert(journal_policy != nullptr); return journal_policy; } void ImageCtx::set_journal_policy(journal::Policy *policy) { - ceph_assert(image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(image_lock)); ceph_assert(policy != nullptr); delete journal_policy; journal_policy = policy; @@ -878,7 +878,7 @@ public: } void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer, - Mutex **timer_lock) { + ceph::mutex **timer_lock) { auto safe_timer_singleton = &cct->lookup_or_create_singleton_object( "librbd::journal::safe_timer", false, cct); diff --git a/src/librbd/ImageCtx.h b/src/librbd/ImageCtx.h index 2ae215fcab19..84a5dbec2edf 100644 --- a/src/librbd/ImageCtx.h +++ b/src/librbd/ImageCtx.h @@ -13,11 +13,10 @@ #include #include "common/allocator.h" +#include "common/ceph_mutex.h" #include "common/config_proxy.h" #include "common/event_socket.h" -#include "common/Mutex.h" #include "common/Readahead.h" -#include "common/RWLock.h" #include "common/snap_types.h" #include "common/zipkin_trace.h" @@ -105,8 +104,8 @@ namespace librbd { * owner_lock, image_lock * async_op_lock, timestamp_lock */ - RWLock owner_lock; // protects exclusive lock leadership updates - RWLock image_lock; // protects snapshot-related member variables, + ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates + mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables, // features (and associated helper classes), and flags // protects access to the mutable image metadata that // isn't guarded by other locks below, and blocks writes @@ -119,9 +118,9 @@ namespace librbd { // object_map // parent_md and parent - RWLock timestamp_lock; // protects (create/access/modify)_timestamp - Mutex async_ops_lock; // protects async_ops and async_requests - Mutex copyup_list_lock; // protects copyup_waiting_list + ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp + ceph::mutex async_ops_lock; // protects async_ops and async_requests + ceph::mutex copyup_list_lock; // protects copyup_waiting_list unsigned extra_read_flags; @@ -281,15 +280,15 @@ namespace librbd { uint64_t get_object_count(librados::snap_t in_snap_id) const; bool test_features(uint64_t test_features) const; bool test_features(uint64_t test_features, - const RWLock &in_image_lock) const; + const ceph::shared_mutex &in_image_lock) const; bool test_op_features(uint64_t op_features) const; bool test_op_features(uint64_t op_features, - const RWLock &in_image_lock) const; + const ceph::shared_mutex &in_image_lock) const; int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const; int test_flags(librados::snap_t in_snap_id, uint64_t test_flags, bool *flags_set) const; int test_flags(librados::snap_t in_snap_id, - uint64_t test_flags, const RWLock &in_image_lock, + uint64_t test_flags, const ceph::shared_mutex &in_image_lock, bool *flags_set) const; int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled); @@ -328,7 +327,7 @@ namespace librbd { ThreadPool **thread_pool, ContextWQ **op_work_queue); static void get_timer_instance(CephContext *cct, SafeTimer **timer, - Mutex **timer_lock); + ceph::mutex **timer_lock); }; } diff --git a/src/librbd/ImageState.cc b/src/librbd/ImageState.cc index 1f0535e25c82..acdfaeea9cde 100644 --- a/src/librbd/ImageState.cc +++ b/src/librbd/ImageState.cc @@ -27,7 +27,7 @@ class ImageUpdateWatchers { public: explicit ImageUpdateWatchers(CephContext *cct) : m_cct(cct), - m_lock(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this)) { + m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this))) { } ~ImageUpdateWatchers() { @@ -42,7 +42,7 @@ public: void flush(Context *on_finish) { ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (!m_in_flight.empty()) { Context *ctx = new FunctionContext( [this, on_finish](int r) { @@ -62,7 +62,7 @@ public: void shut_down(Context *on_finish) { ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_on_shut_down_finish == nullptr); m_watchers.clear(); if (!m_in_flight.empty()) { @@ -78,7 +78,7 @@ public: void register_watcher(UpdateWatchCtx *watcher, uint64_t *handle) { ldout(m_cct, 20) << __func__ << ": watcher=" << watcher << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_on_shut_down_finish == nullptr); create_work_queue(); @@ -92,7 +92,7 @@ public: << handle << dendl; int r = 0; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto it = m_watchers.find(handle); if (it == m_watchers.end()) { r = -ENOENT; @@ -116,14 +116,14 @@ public: void notify() { ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; for (auto it : m_watchers) { send_notify(it.first, it.second); } } void send_notify(uint64_t handle, UpdateWatchCtx *watcher) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle=" << handle << ", watcher=" << watcher << dendl; @@ -149,7 +149,7 @@ public: Context *on_shut_down_finish = nullptr; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto in_flight_it = m_in_flight.find(handle); ceph_assert(in_flight_it != m_in_flight.end()); @@ -200,7 +200,7 @@ private: }; CephContext *m_cct; - Mutex m_lock; + ceph::mutex m_lock; ContextWQ *m_work_queue = nullptr; std::map m_watchers; uint64_t m_next_handle = 0; @@ -232,7 +232,7 @@ private: template ImageState::ImageState(I *image_ctx) : m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED), - m_lock(util::unique_lock_name("librbd::ImageState::m_lock", this)), + m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageState::m_lock", this))), m_last_refresh(0), m_refresh_seq(0), m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)) { } @@ -260,7 +260,7 @@ void ImageState::open(uint64_t flags, Context *on_finish) { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; - m_lock.Lock(); + m_lock.lock(); ceph_assert(m_state == STATE_UNINITIALIZED); m_open_flags = flags; @@ -285,7 +285,7 @@ void ImageState::close(Context *on_finish) { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; - m_lock.Lock(); + m_lock.lock(); ceph_assert(!is_closed()); Action action(ACTION_TYPE_CLOSE); @@ -295,7 +295,7 @@ void ImageState::close(Context *on_finish) { template void ImageState::handle_update_notification() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ++m_refresh_seq; CephContext *cct = m_image_ctx->cct; @@ -309,7 +309,7 @@ void ImageState::handle_update_notification() { template bool ImageState::is_refresh_required() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return (m_last_refresh != m_refresh_seq || find_pending_refresh() != nullptr); } @@ -325,9 +325,9 @@ void ImageState::refresh(Context *on_finish) { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; - m_lock.Lock(); + m_lock.lock(); if (is_closed()) { - m_lock.Unlock(); + m_lock.unlock(); on_finish->complete(-ESHUTDOWN); return; } @@ -341,7 +341,7 @@ template int ImageState::refresh_if_required() { C_SaferCond ctx; { - m_lock.Lock(); + m_lock.lock(); Action action(ACTION_TYPE_REFRESH); action.refresh_seq = m_refresh_seq; @@ -350,10 +350,10 @@ int ImageState::refresh_if_required() { // if a refresh is in-flight, delay until it is finished action = *refresh_action; } else if (m_last_refresh == m_refresh_seq) { - m_lock.Unlock(); + m_lock.unlock(); return 0; } else if (is_closed()) { - m_lock.Unlock(); + m_lock.unlock(); return -ESHUTDOWN; } @@ -366,7 +366,7 @@ int ImageState::refresh_if_required() { template const typename ImageState::Action * ImageState::find_pending_refresh() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto it = std::find_if(m_actions_contexts.rbegin(), m_actions_contexts.rend(), @@ -387,7 +387,7 @@ void ImageState::snap_set(uint64_t snap_id, Context *on_finish) { Action action(ACTION_TYPE_SET_SNAP); action.snap_id = snap_id; - m_lock.Lock(); + m_lock.lock(); execute_action_unlock(action, on_finish); } @@ -396,9 +396,9 @@ void ImageState::prepare_lock(Context *on_ready) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << dendl; - m_lock.Lock(); + m_lock.lock(); if (is_closed()) { - m_lock.Unlock(); + m_lock.unlock(); on_ready->complete(-ESHUTDOWN); return; } @@ -413,9 +413,9 @@ void ImageState::handle_prepare_lock_complete() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << __func__ << dendl; - m_lock.Lock(); + m_lock.lock(); if (m_state != STATE_PREPARING_LOCK) { - m_lock.Unlock(); + m_lock.unlock(); return; } @@ -479,7 +479,7 @@ bool ImageState::is_transition_state() const { template bool ImageState::is_closed() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return ((m_state == STATE_CLOSED) || (!m_actions_contexts.empty() && @@ -488,7 +488,7 @@ bool ImageState::is_closed() const { template void ImageState::append_context(const Action &action, Context *context) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ActionContexts *action_contexts = nullptr; for (auto &action_ctxs : m_actions_contexts) { @@ -510,7 +510,7 @@ void ImageState::append_context(const Action &action, Context *context) { template void ImageState::execute_next_action_unlock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!m_actions_contexts.empty()); switch (m_actions_contexts.front().first.action_type) { case ACTION_TYPE_OPEN: @@ -535,44 +535,44 @@ void ImageState::execute_next_action_unlock() { template void ImageState::execute_action_unlock(const Action &action, Context *on_finish) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); append_context(action, on_finish); if (!is_transition_state()) { execute_next_action_unlock(); } else { - m_lock.Unlock(); + m_lock.unlock(); } } template void ImageState::complete_action_unlock(State next_state, int r) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!m_actions_contexts.empty()); ActionContexts action_contexts(std::move(m_actions_contexts.front())); m_actions_contexts.pop_front(); m_state = next_state; - m_lock.Unlock(); + m_lock.unlock(); for (auto ctx : action_contexts.second) { ctx->complete(r); } if (next_state != STATE_UNINITIALIZED && next_state != STATE_CLOSED) { - m_lock.Lock(); + m_lock.lock(); if (!is_transition_state() && !m_actions_contexts.empty()) { execute_next_action_unlock(); } else { - m_lock.Unlock(); + m_lock.unlock(); } } } template void ImageState::send_open_unlock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -584,7 +584,7 @@ void ImageState::send_open_unlock() { image::OpenRequest *req = image::OpenRequest::create( m_image_ctx, m_open_flags, ctx); - m_lock.Unlock(); + m_lock.unlock(); req->send(); } @@ -597,13 +597,13 @@ void ImageState::handle_open(int r) { lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl; } - m_lock.Lock(); + m_lock.lock(); complete_action_unlock(r < 0 ? STATE_UNINITIALIZED : STATE_OPEN, r); } template void ImageState::send_close_unlock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -614,7 +614,7 @@ void ImageState::send_close_unlock() { image::CloseRequest *req = image::CloseRequest::create( m_image_ctx, ctx); - m_lock.Unlock(); + m_lock.unlock(); req->send(); } @@ -628,13 +628,13 @@ void ImageState::handle_close(int r) { << dendl; } - m_lock.Lock(); + m_lock.lock(); complete_action_unlock(STATE_CLOSED, r); } template void ImageState::send_refresh_unlock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -649,7 +649,7 @@ void ImageState::send_refresh_unlock() { image::RefreshRequest *req = image::RefreshRequest::create( *m_image_ctx, false, false, ctx); - m_lock.Unlock(); + m_lock.unlock(); req->send(); } @@ -658,7 +658,7 @@ void ImageState::handle_refresh(int r) { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; - m_lock.Lock(); + m_lock.lock(); ceph_assert(!m_actions_contexts.empty()); ActionContexts &action_contexts(m_actions_contexts.front()); @@ -677,7 +677,7 @@ void ImageState::handle_refresh(int r) { template void ImageState::send_set_snap_unlock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_state = STATE_SETTING_SNAP; @@ -695,7 +695,7 @@ void ImageState::send_set_snap_unlock() { image::SetSnapRequest *req = image::SetSnapRequest::create( *m_image_ctx, action_contexts.first.snap_id, ctx); - m_lock.Unlock(); + m_lock.unlock(); req->send(); } @@ -708,7 +708,7 @@ void ImageState::handle_set_snap(int r) { lderr(cct) << "failed to set snapshot: " << cpp_strerror(r) << dendl; } - m_lock.Lock(); + m_lock.lock(); complete_action_unlock(STATE_OPEN, r); } @@ -717,7 +717,7 @@ void ImageState::send_prepare_lock_unlock() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_state = STATE_PREPARING_LOCK; ceph_assert(!m_actions_contexts.empty()); @@ -725,7 +725,7 @@ void ImageState::send_prepare_lock_unlock() { ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK); Context *on_ready = action_contexts.first.on_ready; - m_lock.Unlock(); + m_lock.unlock(); if (on_ready == nullptr) { complete_action_unlock(STATE_OPEN, 0); diff --git a/src/librbd/ImageState.h b/src/librbd/ImageState.h index 7f28d1eec712..9daa5137ed9e 100644 --- a/src/librbd/ImageState.h +++ b/src/librbd/ImageState.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_IMAGE_STATE_H #include "include/int_types.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include #include #include @@ -102,7 +102,7 @@ private: ImageCtxT *m_image_ctx; State m_state; - mutable Mutex m_lock; + mutable ceph::mutex m_lock; ActionsContexts m_actions_contexts; uint64_t m_last_refresh; diff --git a/src/librbd/ImageWatcher.cc b/src/librbd/ImageWatcher.cc index 055aec196a14..da5a39fdf845 100644 --- a/src/librbd/ImageWatcher.cc +++ b/src/librbd/ImageWatcher.cc @@ -68,8 +68,10 @@ ImageWatcher::ImageWatcher(I &image_ctx) : Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid), m_image_ctx(image_ctx), m_task_finisher(new TaskFinisher(*m_image_ctx.cct)), - m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)), - m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)) + m_async_request_lock(ceph::make_shared_mutex( + util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))), + m_owner_client_id_lock(ceph::make_mutex( + util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this))) { } @@ -156,7 +158,7 @@ void ImageWatcher::handle_async_complete(const AsyncRequestId &request, schedule_async_complete(request, r); } } else { - RWLock::WLocker async_request_locker(m_async_request_lock); + std::unique_lock async_request_locker{m_async_request_lock}; m_async_pending.erase(request); } } @@ -165,7 +167,7 @@ template void ImageWatcher::notify_flatten(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -180,7 +182,7 @@ void ImageWatcher::notify_resize(uint64_t request_id, uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -195,7 +197,7 @@ template void ImageWatcher::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -206,7 +208,7 @@ template void ImageWatcher::notify_snap_rename(const snapid_t &src_snap_id, const std::string &dst_snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -217,7 +219,7 @@ template void ImageWatcher::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -228,7 +230,7 @@ template void ImageWatcher::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -239,7 +241,7 @@ template void ImageWatcher::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -250,7 +252,7 @@ template void ImageWatcher::notify_rebuild_object_map(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -264,7 +266,7 @@ void ImageWatcher::notify_rebuild_object_map(uint64_t request_id, template void ImageWatcher::notify_rename(const std::string &image_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -274,7 +276,7 @@ void ImageWatcher::notify_rename(const std::string &image_name, template void ImageWatcher::notify_update_features(uint64_t features, bool enabled, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -285,7 +287,7 @@ template void ImageWatcher::notify_migrate(uint64_t request_id, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -299,7 +301,7 @@ template void ImageWatcher::notify_sparsify(uint64_t request_id, size_t sparse_size, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); @@ -336,7 +338,7 @@ void ImageWatcher::schedule_cancel_async_requests() { template void ImageWatcher::cancel_async_requests() { - RWLock::WLocker l(m_async_request_lock); + std::unique_lock l{m_async_request_lock}; for (std::map::iterator iter = m_async_requests.begin(); iter != m_async_requests.end(); ++iter) { @@ -347,7 +349,7 @@ void ImageWatcher::cancel_async_requests() { template void ImageWatcher::set_owner_client_id(const ClientId& client_id) { - ceph_assert(m_owner_client_id_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock)); m_owner_client_id = client_id; ldout(m_image_ctx.cct, 10) << this << " current lock owner: " << m_owner_client_id << dendl; @@ -355,7 +357,7 @@ void ImageWatcher::set_owner_client_id(const ClientId& client_id) { template ClientId ImageWatcher::get_client_id() { - RWLock::RLocker l(this->m_watch_lock); + std::shared_lock l{this->m_watch_lock}; return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle); } @@ -365,7 +367,7 @@ void ImageWatcher::notify_acquired_lock() { ClientId client_id = get_client_id(); { - Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); + std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; set_owner_client_id(client_id); } @@ -377,7 +379,7 @@ void ImageWatcher::notify_released_lock() { ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl; { - Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); + std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; set_owner_client_id(ClientId()); } @@ -386,7 +388,7 @@ void ImageWatcher::notify_released_lock() { template void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); if (m_image_ctx.exclusive_lock == nullptr) { // exclusive lock dynamically disabled via image refresh @@ -395,7 +397,7 @@ void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { ceph_assert(m_image_ctx.exclusive_lock && !m_image_ctx.exclusive_lock->is_lock_owner()); - RWLock::RLocker watch_locker(this->m_watch_lock); + std::shared_lock watch_locker{this->m_watch_lock}; if (this->is_registered(this->m_watch_lock)) { ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; @@ -415,8 +417,8 @@ void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { template void ImageWatcher::notify_request_lock() { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::shared_lock image_locker{m_image_ctx.image_lock}; // ExclusiveLock state machine can be dynamically disabled or // race with task cancel @@ -434,8 +436,8 @@ void ImageWatcher::notify_request_lock() { template void ImageWatcher::handle_request_lock(int r) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::shared_lock image_locker{m_image_ctx.image_lock}; // ExclusiveLock state machine cannot transition -- but can be // dynamically disabled @@ -470,7 +472,7 @@ template void ImageWatcher::notify_lock_owner(const Payload& payload, Context *on_finish) { ceph_assert(on_finish != nullptr); - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); bufferlist bl; encode(NotifyMessage(payload), bl); @@ -482,7 +484,7 @@ void ImageWatcher::notify_lock_owner(const Payload& payload, template Context *ImageWatcher::remove_async_request(const AsyncRequestId &id) { - RWLock::WLocker async_request_locker(m_async_request_lock); + std::unique_lock async_request_locker{m_async_request_lock}; auto it = m_async_requests.find(id); if (it != m_async_requests.end()) { Context *on_complete = it->second.first; @@ -523,7 +525,7 @@ void ImageWatcher::notify_async_request(const AsyncRequestId &async_request_i ProgressContext& prog_ctx, Context *on_finish) { ceph_assert(on_finish != nullptr); - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id << dendl; @@ -545,7 +547,7 @@ void ImageWatcher::notify_async_request(const AsyncRequestId &async_request_i }); { - RWLock::WLocker async_request_locker(m_async_request_lock); + std::unique_lock async_request_locker{m_async_request_lock}; m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx); } @@ -560,7 +562,7 @@ int ImageWatcher::prepare_async_request(const AsyncRequestId& async_request_i if (async_request_id.client_id == get_client_id()) { return -ERESTART; } else { - RWLock::WLocker l(m_async_request_lock); + std::unique_lock l{m_async_request_lock}; if (m_async_pending.count(async_request_id) == 0) { m_async_pending.insert(async_request_id); *new_request = true; @@ -595,14 +597,14 @@ bool ImageWatcher::handle_payload(const AcquiredLockPayload &payload, bool cancel_async_requests = true; if (payload.client_id.is_valid()) { - Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); + std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; if (payload.client_id == m_owner_client_id) { cancel_async_requests = false; } set_owner_client_id(payload.client_id); } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { // potentially wake up the exclusive lock state machine now that // a lock owner has advertised itself @@ -623,7 +625,7 @@ bool ImageWatcher::handle_payload(const ReleasedLockPayload &payload, bool cancel_async_requests = true; if (payload.client_id.is_valid()) { - Mutex::Locker l(m_owner_client_id_lock); + std::lock_guard l{m_owner_client_id_lock}; if (payload.client_id != m_owner_client_id) { ldout(m_image_ctx.cct, 10) << this << " unexpected owner: " << payload.client_id << " != " @@ -634,7 +636,7 @@ bool ImageWatcher::handle_payload(const ReleasedLockPayload &payload, } } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (cancel_async_requests && (m_image_ctx.exclusive_lock == nullptr || !m_image_ctx.exclusive_lock->is_lock_owner())) { @@ -658,7 +660,7 @@ bool ImageWatcher::handle_payload(const RequestLockPayload &payload, return true; } - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr && m_image_ctx.exclusive_lock->is_lock_owner()) { int r = 0; @@ -666,7 +668,7 @@ bool ImageWatcher::handle_payload(const RequestLockPayload &payload, if (accept_request) { ceph_assert(r == 0); - Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); + std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; if (!m_owner_client_id.is_valid()) { return true; } @@ -684,7 +686,7 @@ bool ImageWatcher::handle_payload(const RequestLockPayload &payload, template bool ImageWatcher::handle_payload(const AsyncProgressPayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_async_request_lock); + std::shared_lock l{m_async_request_lock}; std::map::iterator req_it = m_async_requests.find(payload.async_request_id); if (req_it != m_async_requests.end()) { @@ -715,7 +717,7 @@ template bool ImageWatcher::handle_payload(const FlattenPayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -741,7 +743,7 @@ bool ImageWatcher::handle_payload(const FlattenPayload &payload, template bool ImageWatcher::handle_payload(const ResizePayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -769,7 +771,7 @@ bool ImageWatcher::handle_payload(const ResizePayload &payload, template bool ImageWatcher::handle_payload(const SnapCreatePayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -791,7 +793,7 @@ bool ImageWatcher::handle_payload(const SnapCreatePayload &payload, template bool ImageWatcher::handle_payload(const SnapRenamePayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -813,7 +815,7 @@ bool ImageWatcher::handle_payload(const SnapRenamePayload &payload, template bool ImageWatcher::handle_payload(const SnapRemovePayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -834,7 +836,7 @@ bool ImageWatcher::handle_payload(const SnapRemovePayload &payload, template bool ImageWatcher::handle_payload(const SnapProtectPayload& payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -855,7 +857,7 @@ bool ImageWatcher::handle_payload(const SnapProtectPayload& payload, template bool ImageWatcher::handle_payload(const SnapUnprotectPayload& payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -876,7 +878,7 @@ bool ImageWatcher::handle_payload(const SnapUnprotectPayload& payload, template bool ImageWatcher::handle_payload(const RebuildObjectMapPayload& payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -903,7 +905,7 @@ bool ImageWatcher::handle_payload(const RebuildObjectMapPayload& payload, template bool ImageWatcher::handle_payload(const RenamePayload& payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -923,7 +925,7 @@ bool ImageWatcher::handle_payload(const RenamePayload& payload, template bool ImageWatcher::handle_payload(const UpdateFeaturesPayload& payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -946,7 +948,7 @@ template bool ImageWatcher::handle_payload(const MigratePayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -972,7 +974,7 @@ bool ImageWatcher::handle_payload(const MigratePayload &payload, template bool ImageWatcher::handle_payload(const SparsifyPayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r)) { @@ -999,7 +1001,7 @@ bool ImageWatcher::handle_payload(const SparsifyPayload &payload, template bool ImageWatcher::handle_payload(const UnknownPayload &payload, C_NotifyAck *ack_ctx) { - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { int r; if (m_image_ctx.exclusive_lock->accept_requests(&r) || r < 0) { @@ -1050,7 +1052,7 @@ void ImageWatcher::handle_error(uint64_t handle, int err) { << cpp_strerror(err) << dendl; { - Mutex::Locker l(m_owner_client_id_lock); + std::lock_guard l{m_owner_client_id_lock}; set_owner_client_id(ClientId()); } @@ -1063,7 +1065,7 @@ void ImageWatcher::handle_rewatch_complete(int r) { ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr) { // update the lock cookie with the new watch handle m_image_ctx.exclusive_lock->reacquire_lock(nullptr); diff --git a/src/librbd/ImageWatcher.h b/src/librbd/ImageWatcher.h index 441a7d696ad3..1ae54d80bfd4 100644 --- a/src/librbd/ImageWatcher.h +++ b/src/librbd/ImageWatcher.h @@ -5,8 +5,7 @@ #define CEPH_LIBRBD_IMAGE_WATCHER_H #include "cls/rbd/cls_rbd_types.h" -#include "common/Mutex.h" -#include "common/RWLock.h" +#include "common/ceph_mutex.h" #include "include/Context.h" #include "include/rbd/librbd.hpp" #include "librbd/Watcher.h" @@ -165,11 +164,11 @@ private: TaskFinisher *m_task_finisher; - RWLock m_async_request_lock; + ceph::shared_mutex m_async_request_lock; std::map m_async_requests; std::set m_async_pending; - Mutex m_owner_client_id_lock; + ceph::mutex m_owner_client_id_lock; watch_notify::ClientId m_owner_client_id; void handle_register_watch(int r); diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc index 66abdf902073..b73a240e2316 100644 --- a/src/librbd/Journal.cc +++ b/src/librbd/Journal.cc @@ -134,13 +134,13 @@ struct GetTagsRequest { journal::TagData *tag_data; Context *on_finish; - Mutex lock; + ceph::mutex lock = ceph::make_mutex("lock"); GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client, journal::ImageClientMeta *client_meta, uint64_t *tag_tid, journal::TagData *tag_data, Context *on_finish) : cct(cct), journaler(journaler), client(client), client_meta(client_meta), - tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish), lock("lock") { + tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) { } /** @@ -327,9 +327,9 @@ std::ostream &operator<<(std::ostream &os, template Journal::Journal(I &image_ctx) : m_image_ctx(image_ctx), m_journaler(NULL), - m_lock("Journal::m_lock"), m_state(STATE_UNINITIALIZED), + m_state(STATE_UNINITIALIZED), m_error_result(0), m_replay_handler(this), m_close_pending(false), - m_event_lock("Journal::m_event_lock"), m_event_tid(0), + m_event_tid(0), m_blocking_writes(false), m_journal_replay(NULL), m_metadata_listener(this) { @@ -360,7 +360,7 @@ Journal::~Journal() { template bool Journal::is_journal_supported(I &image_ctx) { - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); return ((image_ctx.features & RBD_FEATURE_JOURNALING) && !image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP); } @@ -462,7 +462,7 @@ int Journal::request_resync(I *image_ctx) { Journaler journaler(image_ctx->md_ctx, image_ctx->id, IMAGE_CLIENT_ID, {}, nullptr); - Mutex lock("lock"); + ceph::mutex lock = ceph::make_mutex("lock"); journal::ImageClientMeta client_meta; uint64_t tag_tid; journal::TagData tag_data; @@ -521,19 +521,19 @@ void Journal::demote(I *image_ctx, Context *on_finish) { template bool Journal::is_journal_ready() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return (m_state == STATE_READY); } template bool Journal::is_journal_replaying() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return is_journal_replaying(m_lock); } template -bool Journal::is_journal_replaying(const Mutex &) const { - ceph_assert(m_lock.is_locked()); +bool Journal::is_journal_replaying(const ceph::mutex &) const { + ceph_assert(ceph_mutex_is_locked(m_lock)); return (m_state == STATE_REPLAYING || m_state == STATE_FLUSHING_REPLAY || m_state == STATE_FLUSHING_RESTART || @@ -542,8 +542,8 @@ bool Journal::is_journal_replaying(const Mutex &) const { template bool Journal::is_journal_appending() const { - ceph_assert(m_image_ctx.image_lock.is_locked()); - Mutex::Locker locker(m_lock); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); + std::lock_guard locker{m_lock}; return (m_state == STATE_READY && !m_image_ctx.get_journal_policy()->append_disabled()); } @@ -552,7 +552,7 @@ template void Journal::wait_for_journal_ready(Context *on_ready) { on_ready = create_async_context_callback(m_image_ctx, on_ready); - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state == STATE_READY) { on_ready->complete(m_error_result); } else { @@ -571,7 +571,7 @@ void Journal::open(Context *on_finish) { m_image_ctx.io_object_dispatcher->register_object_dispatch( journal::ObjectDispatch::create(&m_image_ctx, this)); - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_UNINITIALIZED); wait_for_steady_state(on_finish); create_journaler(); @@ -592,21 +592,19 @@ void Journal::close(Context *on_finish) { }); on_finish = create_async_context_callback(m_image_ctx, on_finish); - Mutex::Locker locker(m_lock); - while (m_listener_notify) { - m_listener_cond.Wait(m_lock); - } + std::unique_lock locker{m_lock}; + m_listener_cond.wait(locker, [this] { return !m_listener_notify; }); Listeners listeners(m_listeners); m_listener_notify = true; - m_lock.Unlock(); + m_lock.unlock(); for (auto listener : listeners) { listener->handle_close(); } - m_lock.Lock(); + m_lock.lock(); m_listener_notify = false; - m_listener_cond.Signal(); + m_listener_cond.notify_all(); ceph_assert(m_state != STATE_UNINITIALIZED); if (m_state == STATE_CLOSED) { @@ -624,25 +622,25 @@ void Journal::close(Context *on_finish) { template bool Journal::is_tag_owner() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return is_tag_owner(m_lock); } template -bool Journal::is_tag_owner(const Mutex &) const { - ceph_assert(m_lock.is_locked()); +bool Journal::is_tag_owner(const ceph::mutex &) const { + ceph_assert(ceph_mutex_is_locked(m_lock)); return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID); } template uint64_t Journal::get_tag_tid() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return m_tag_tid; } template journal::TagData Journal::get_tag_data() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return m_tag_data; } @@ -654,7 +652,7 @@ void Journal::allocate_local_tag(Context *on_finish) { journal::TagPredecessor predecessor; predecessor.mirror_uuid = LOCAL_MIRROR_UUID; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock)); cls::journal::Client client; @@ -688,7 +686,7 @@ void Journal::allocate_tag(const std::string &mirror_uuid, ldout(cct, 20) << this << " " << __func__ << ": mirror_uuid=" << mirror_uuid << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_journaler != nullptr); journal::TagData tag_data; @@ -709,7 +707,7 @@ void Journal::flush_commit_position(Context *on_finish) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_journaler != nullptr); m_journaler->flush_commit_position(on_finish); } @@ -718,7 +716,7 @@ template void Journal::user_flushed() { if (m_state == STATE_READY && !m_user_flushed.exchange(true) && m_image_ctx.config.template get_val("rbd_journal_object_writethrough_until_flush")) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state == STATE_READY) { CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -787,7 +785,7 @@ uint64_t Journal::append_io_events(journal::EventType event_type, uint64_t tid; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_READY); tid = ++m_event_tid; @@ -801,7 +799,7 @@ uint64_t Journal::append_io_events(journal::EventType event_type, } { - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; m_events[tid] = Event(futures, offset, length, filter_ret_val); } @@ -829,7 +827,7 @@ void Journal::commit_io_event(uint64_t tid, int r) { ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", " "r=" << r << dendl; - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; typename Events::iterator it = m_events.find(tid); if (it == m_events.end()) { return; @@ -848,7 +846,7 @@ void Journal::commit_io_event_extent(uint64_t tid, uint64_t offset, << "length=" << length << ", " << "r=" << r << dendl; - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; typename Events::iterator it = m_events.find(tid); if (it == m_events.end()) { return; @@ -878,7 +876,7 @@ template void Journal::append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry, Context *on_safe) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); bufferlist bl; event_entry.timestamp = ceph_clock_now(); @@ -886,7 +884,7 @@ void Journal::append_op_event(uint64_t op_tid, Future future; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_READY); future = m_journaler->append(m_tag_tid, bl); @@ -924,7 +922,7 @@ void Journal::commit_op_event(uint64_t op_tid, int r, Context *on_safe) { Future op_start_future; Future op_finish_future; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_READY); // ready to commit op event @@ -947,7 +945,7 @@ void Journal::replay_op_ready(uint64_t op_tid, Context *on_resume) { ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_journal_replay != nullptr); m_journal_replay->replay_op_ready(op_tid, on_resume); } @@ -961,7 +959,7 @@ void Journal::flush_event(uint64_t tid, Context *on_safe) { Future future; { - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; future = wait_event(m_lock, tid, on_safe); } @@ -976,14 +974,14 @@ void Journal::wait_event(uint64_t tid, Context *on_safe) { ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", " << "on_safe=" << on_safe << dendl; - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; wait_event(m_lock, tid, on_safe); } template -typename Journal::Future Journal::wait_event(Mutex &lock, uint64_t tid, +typename Journal::Future Journal::wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe) { - ceph_assert(m_event_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_event_lock)); CephContext *cct = m_image_ctx.cct; typename Events::iterator it = m_events.find(tid); @@ -1009,7 +1007,7 @@ void Journal::start_external_replay(journal::Replay **journal_replay, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_READY); ceph_assert(m_journal_replay == nullptr); @@ -1031,7 +1029,7 @@ void Journal::handle_start_external_replay(int r, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_READY); ceph_assert(m_journal_replay == nullptr); @@ -1057,7 +1055,7 @@ void Journal::stop_external_replay() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_journal_replay != nullptr); ceph_assert(m_state == STATE_REPLAYING); @@ -1077,7 +1075,7 @@ void Journal::create_journaler() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY); ceph_assert(m_journaler == NULL); @@ -1112,7 +1110,7 @@ void Journal::destroy_journaler(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); delete m_journal_replay; m_journal_replay = NULL; @@ -1126,7 +1124,7 @@ void Journal::destroy_journaler(int r) { Journal, &Journal::handle_journal_destroyed>(this)); ctx = new FunctionContext( [this, ctx](int r) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_journaler->shut_down(ctx); }); m_async_journal_op_tracker.wait(m_image_ctx, ctx); @@ -1137,7 +1135,7 @@ void Journal::recreate_journaler(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); @@ -1154,7 +1152,7 @@ void Journal::recreate_journaler(int r) { template void Journal::complete_event(typename Events::iterator it, int r) { - ceph_assert(m_event_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_event_lock)); ceph_assert(m_state == STATE_READY); CephContext *cct = m_image_ctx.cct; @@ -1188,7 +1186,7 @@ void Journal::complete_event(typename Events::iterator it, int r) { template void Journal::start_append() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_journaler->start_append( m_image_ctx.config.template get_val("rbd_journal_object_max_in_flight_appends")); @@ -1207,7 +1205,7 @@ void Journal::handle_open(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_INITIALIZING); if (r < 0) { @@ -1234,7 +1232,7 @@ void Journal::handle_replay_ready() { CephContext *cct = m_image_ctx.cct; ReplayEntry replay_entry; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state != STATE_REPLAYING) { return; } @@ -1273,7 +1271,7 @@ void Journal::handle_replay_complete(int r) { bool cancel_ops = false; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state != STATE_REPLAYING) { return; } @@ -1294,7 +1292,7 @@ void Journal::handle_replay_complete(int r) { State state; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); state = m_state; @@ -1326,7 +1324,7 @@ void Journal::handle_replay_process_ready(int r) { ceph_assert(r == 0); { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_processing_entry); m_processing_entry = false; } @@ -1337,7 +1335,7 @@ template void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { CephContext *cct = m_image_ctx.cct; - m_lock.Lock(); + std::unique_lock locker{m_lock}; ceph_assert(m_state == STATE_REPLAYING || m_state == STATE_FLUSHING_RESTART || m_state == STATE_FLUSHING_REPLAY); @@ -1353,7 +1351,7 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { if (m_state == STATE_REPLAYING) { // abort the replay if we have an error transition_state(STATE_FLUSHING_RESTART, r); - m_lock.Unlock(); + locker.unlock(); // stop replay, shut down, and restart Context* ctx = create_context_callback< @@ -1366,7 +1364,7 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { ldout(cct, 20) << this << " handle_replay_process_safe: " << "shut down replay" << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_FLUSHING_RESTART); } @@ -1377,19 +1375,17 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { } else if (m_state == STATE_FLUSHING_REPLAY) { // end-of-replay flush in-progress -- we need to restart replay transition_state(STATE_FLUSHING_RESTART, r); - m_lock.Unlock(); return; } } else { // only commit the entry if written successfully m_journaler->committed(replay_entry); } - m_lock.Unlock(); } template void Journal::handle_flushing_restart(int r) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; @@ -1406,7 +1402,7 @@ void Journal::handle_flushing_restart(int r) { template void Journal::handle_flushing_replay() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; @@ -1434,7 +1430,7 @@ void Journal::handle_recording_stopped(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_STOPPING); destroy_journaler(r); @@ -1451,7 +1447,7 @@ void Journal::handle_journal_destroyed(int r) { << dendl; } - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; delete m_journaler; m_journaler = nullptr; @@ -1479,7 +1475,7 @@ void Journal::handle_io_event_safe(int r, uint64_t tid) { Contexts on_safe_contexts; { - Mutex::Locker event_locker(m_event_lock); + std::lock_guard event_locker{m_event_lock}; typename Events::iterator it = m_events.find(tid); ceph_assert(it != m_events.end()); @@ -1537,7 +1533,7 @@ void Journal::handle_op_event_safe(int r, uint64_t tid, template void Journal::stop_recording() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_journaler != NULL); ceph_assert(m_state == STATE_READY); @@ -1552,7 +1548,7 @@ template void Journal::transition_state(State state, int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_state = state; if (m_error_result == 0 && r < 0) { @@ -1569,7 +1565,7 @@ void Journal::transition_state(State state, int r) { template bool Journal::is_steady_state() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); switch (m_state) { case STATE_READY: case STATE_CLOSED: @@ -1589,7 +1585,7 @@ bool Journal::is_steady_state() const { template void Journal::wait_for_steady_state(Context *on_state) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!is_steady_state()); CephContext *cct = m_image_ctx.cct; @@ -1600,7 +1596,7 @@ void Journal::wait_for_steady_state(Context *on_state) { template int Journal::is_resync_requested(bool *do_resync) { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; return check_resync_requested(do_resync); } @@ -1609,7 +1605,7 @@ int Journal::check_resync_requested(bool *do_resync) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(do_resync != nullptr); cls::journal::Client client; @@ -1647,13 +1643,13 @@ struct C_RefreshTags : public Context { util::AsyncOpTracker &async_op_tracker; Context *on_finish = nullptr; - Mutex lock; + ceph::mutex lock = + ceph::make_mutex("librbd::Journal::C_RefreshTags::lock"); uint64_t tag_tid = 0; journal::TagData tag_data; explicit C_RefreshTags(util::AsyncOpTracker &async_op_tracker) - : async_op_tracker(async_op_tracker), - lock("librbd::Journal::C_RefreshTags::lock") { + : async_op_tracker(async_op_tracker) { async_op_tracker.start_op(); } ~C_RefreshTags() override { @@ -1668,7 +1664,7 @@ struct C_RefreshTags : public Context { template void Journal::handle_metadata_updated() { CephContext *cct = m_image_ctx.cct; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state != STATE_READY && !is_journal_replaying(m_lock)) { return; @@ -1704,7 +1700,7 @@ void Journal::handle_refresh_metadata(uint64_t refresh_sequence, uint64_t tag_tid, journal::TagData tag_data, int r) { CephContext *cct = m_image_ctx.cct; - Mutex::Locker locker(m_lock); + std::unique_lock locker{m_lock}; if (r < 0) { lderr(cct) << this << " " << __func__ << ": failed to refresh metadata: " @@ -1721,9 +1717,7 @@ void Journal::handle_refresh_metadata(uint64_t refresh_sequence, << "refresh_sequence=" << refresh_sequence << ", " << "tag_tid=" << tag_tid << ", " << "tag_data=" << tag_data << dendl; - while (m_listener_notify) { - m_listener_cond.Wait(m_lock); - } + m_listener_cond.wait(locker, [this] { return !m_listener_notify; }); bool was_tag_owner = is_tag_owner(m_lock); if (m_tag_tid < tag_tid) { @@ -1742,7 +1736,7 @@ void Journal::handle_refresh_metadata(uint64_t refresh_sequence, Listeners listeners(m_listeners); m_listener_notify = true; - m_lock.Unlock(); + m_lock.unlock(); if (promoted_to_primary) { for (auto listener : listeners) { @@ -1754,23 +1748,21 @@ void Journal::handle_refresh_metadata(uint64_t refresh_sequence, } } - m_lock.Lock(); + m_lock.lock(); m_listener_notify = false; - m_listener_cond.Signal(); + m_listener_cond.notify_all(); } template void Journal::add_listener(journal::Listener *listener) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_listeners.insert(listener); } template void Journal::remove_listener(journal::Listener *listener) { - Mutex::Locker locker(m_lock); - while (m_listener_notify) { - m_listener_cond.Wait(m_lock); - } + std::unique_lock locker{m_lock}; + m_listener_cond.wait(locker, [this] { return !m_listener_notify; }); m_listeners.erase(listener); } diff --git a/src/librbd/Journal.h b/src/librbd/Journal.h index e63cc4a71377..13be62aa1635 100644 --- a/src/librbd/Journal.h +++ b/src/librbd/Journal.h @@ -9,8 +9,6 @@ #include "include/interval_set.h" #include "include/rados/librados_fwd.hpp" #include "common/Cond.h" -#include "common/Mutex.h" -#include "common/Cond.h" #include "common/WorkQueue.h" #include "journal/Future.h" #include "journal/JournalMetadataListener.h" @@ -272,10 +270,10 @@ private: ContextWQ *m_work_queue = nullptr; SafeTimer *m_timer = nullptr; - Mutex *m_timer_lock = nullptr; + ceph::mutex *m_timer_lock = nullptr; Journaler *m_journaler; - mutable Mutex m_lock; + mutable ceph::mutex m_lock = ceph::make_mutex("Journal::m_lock"); State m_state; uint64_t m_max_append_size = 0; uint64_t m_tag_class = 0; @@ -289,7 +287,7 @@ private: ReplayHandler m_replay_handler; bool m_close_pending; - Mutex m_event_lock; + ceph::mutex m_event_lock = ceph::make_mutex("Journal::m_event_lock"); uint64_t m_event_tid; Events m_events; @@ -320,19 +318,19 @@ private: typedef std::set Listeners; Listeners m_listeners; - Cond m_listener_cond; + ceph::condition_variable m_listener_cond; bool m_listener_notify = false; uint64_t m_refresh_sequence = 0; - bool is_journal_replaying(const Mutex &) const; - bool is_tag_owner(const Mutex &) const; + bool is_journal_replaying(const ceph::mutex &) const; + bool is_tag_owner(const ceph::mutex &) const; uint64_t append_io_events(journal::EventType event_type, const Bufferlists &bufferlists, uint64_t offset, size_t length, bool flush_entry, int filter_ret_val); - Future wait_event(Mutex &lock, uint64_t tid, Context *on_safe); + Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe); void create_journaler(); void destroy_journaler(int r); diff --git a/src/librbd/ManagedLock.cc b/src/librbd/ManagedLock.cc index 15d2016f26a3..28867a98733e 100644 --- a/src/librbd/ManagedLock.cc +++ b/src/librbd/ManagedLock.cc @@ -67,7 +67,7 @@ ManagedLock::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue, const string& oid, Watcher *watcher, Mode mode, bool blacklist_on_break_lock, uint32_t blacklist_expire_seconds) - : m_lock(unique_lock_name("librbd::ManagedLock::m_lock", this)), + : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock::m_lock", this))), m_ioctx(ioctx), m_cct(reinterpret_cast(ioctx.cct())), m_work_queue(work_queue), m_oid(oid), @@ -80,7 +80,7 @@ ManagedLock::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue, template ManagedLock::~ManagedLock() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED || m_state == STATE_UNINITIALIZED); if (m_state == STATE_UNINITIALIZED) { @@ -95,15 +95,15 @@ ManagedLock::~ManagedLock() { template bool ManagedLock::is_lock_owner() const { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; return is_lock_owner(m_lock); } template -bool ManagedLock::is_lock_owner(Mutex &lock) const { +bool ManagedLock::is_lock_owner(ceph::mutex &lock) const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); bool lock_owner; @@ -128,7 +128,7 @@ template void ManagedLock::shut_down(Context *on_shut_down) { ldout(m_cct, 10) << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(!is_state_shutdown()); if (m_state == STATE_WAITING_FOR_REGISTER) { @@ -147,7 +147,7 @@ template void ManagedLock::acquire_lock(Context *on_acquired) { int r = 0; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (is_state_shutdown()) { r = -ESHUTDOWN; } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { @@ -166,7 +166,7 @@ template void ManagedLock::try_acquire_lock(Context *on_acquired) { int r = 0; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (is_state_shutdown()) { r = -ESHUTDOWN; } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) { @@ -185,7 +185,7 @@ template void ManagedLock::release_lock(Context *on_released) { int r = 0; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (is_state_shutdown()) { r = -ESHUTDOWN; } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) { @@ -203,7 +203,7 @@ void ManagedLock::release_lock(Context *on_released) { template void ManagedLock::reacquire_lock(Context *on_reacquired) { { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_state == STATE_WAITING_FOR_REGISTER) { // restart the acquire lock process now that watch is valid @@ -237,7 +237,7 @@ void ManagedLock::get_locker(managed_lock::Locker *locker, int r; { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; if (is_state_shutdown()) { r = -ESHUTDOWN; } else { @@ -259,7 +259,7 @@ void ManagedLock::break_lock(const managed_lock::Locker &locker, int r; { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; if (is_state_shutdown()) { r = -ESHUTDOWN; } else if (is_lock_owner(m_lock)) { @@ -284,7 +284,7 @@ int ManagedLock::assert_header_locked() { librados::ObjectReadOperation op; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, (m_mode == EXCLUSIVE ? LOCK_EXCLUSIVE : LOCK_SHARED), @@ -371,7 +371,7 @@ bool ManagedLock::is_transition_state() const { template void ManagedLock::append_context(Action action, Context *ctx) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); for (auto &action_ctxs : m_actions_contexts) { if (action == action_ctxs.first) { @@ -391,7 +391,7 @@ void ManagedLock::append_context(Action action, Context *ctx) { template void ManagedLock::execute_action(Action action, Context *ctx) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); append_context(action, ctx); if (!is_transition_state()) { @@ -401,7 +401,7 @@ void ManagedLock::execute_action(Action action, Context *ctx) { template void ManagedLock::execute_next_action() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!m_actions_contexts.empty()); switch (get_active_action()) { case ACTION_ACQUIRE_LOCK: @@ -425,25 +425,25 @@ void ManagedLock::execute_next_action() { template typename ManagedLock::Action ManagedLock::get_active_action() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!m_actions_contexts.empty()); return m_actions_contexts.front().first; } template void ManagedLock::complete_active_action(State next_state, int r) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(!m_actions_contexts.empty()); ActionContexts action_contexts(std::move(m_actions_contexts.front())); m_actions_contexts.pop_front(); m_state = next_state; - m_lock.Unlock(); + m_lock.unlock(); for (auto ctx : action_contexts.second) { ctx->complete(r); } - m_lock.Lock(); + m_lock.lock(); if (!is_transition_state() && !m_actions_contexts.empty()) { execute_next_action(); @@ -452,7 +452,7 @@ void ManagedLock::complete_active_action(State next_state, int r) { template bool ManagedLock::is_state_shutdown() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); switch (m_state) { case STATE_PRE_SHUTTING_DOWN: @@ -469,7 +469,7 @@ bool ManagedLock::is_state_shutdown() const { template void ManagedLock::send_acquire_lock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_state == STATE_LOCKED) { complete_active_action(STATE_LOCKED, 0); return; @@ -541,7 +541,7 @@ template void ManagedLock::handle_post_acquire_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (r < 0 && m_post_next_state == STATE_LOCKED) { // release_lock without calling pre and post handlers @@ -560,7 +560,7 @@ void ManagedLock::revert_to_unlock_state(int r) { ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, m_work_queue, m_oid, m_cookie, new FunctionContext([this, r](int ret) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(ret == 0); complete_active_action(STATE_UNLOCKED, r); })); @@ -569,7 +569,7 @@ void ManagedLock::revert_to_unlock_state(int r) { template void ManagedLock::send_reacquire_lock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_state != STATE_LOCKED) { complete_active_action(m_state, 0); @@ -618,7 +618,7 @@ template void ManagedLock::handle_reacquire_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_REACQUIRING); if (r < 0) { @@ -647,7 +647,7 @@ void ManagedLock::handle_no_op_reacquire_lock(int r) { template void ManagedLock::release_acquire_lock() { - assert(m_lock.is_locked()); + assert(ceph_mutex_is_locked(m_lock)); if (!is_state_shutdown()) { // queue a release and re-acquire of the lock since cookie cannot @@ -672,7 +672,7 @@ void ManagedLock::release_acquire_lock() { template void ManagedLock::send_release_lock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_state == STATE_UNLOCKED) { complete_active_action(STATE_UNLOCKED, 0); return; @@ -692,7 +692,7 @@ void ManagedLock::handle_pre_release_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_PRE_RELEASING); m_state = STATE_RELEASING; } @@ -714,7 +714,7 @@ template void ManagedLock::handle_release_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_RELEASING); if (r >= 0 || r == -EBLACKLISTED || r == -ENOENT) { @@ -734,14 +734,14 @@ template void ManagedLock::handle_post_release_lock(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; complete_active_action(m_post_next_state, r); } template void ManagedLock::send_shutdown() { ldout(m_cct, 10) << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_state == STATE_UNLOCKED) { m_state = STATE_SHUTTING_DOWN; m_work_queue->queue(new FunctionContext([this](int r) { @@ -754,9 +754,9 @@ void ManagedLock::send_shutdown() { ceph_assert(m_state == STATE_LOCKED); m_state = STATE_PRE_SHUTTING_DOWN; - m_lock.Unlock(); + m_lock.unlock(); m_work_queue->queue(new C_ShutDownRelease(this), 0); - m_lock.Lock(); + m_lock.lock(); } template @@ -770,7 +770,7 @@ template void ManagedLock::send_shutdown_release() { ldout(m_cct, 10) << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_work_queue->queue(new FunctionContext([this](int r) { pre_release_lock_handler(true, create_context_callback< @@ -784,7 +784,7 @@ void ManagedLock::handle_shutdown_pre_release(int r) { std::string cookie; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; cookie = m_cookie; ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN); @@ -832,8 +832,8 @@ void ManagedLock::complete_shutdown(int r) { ActionContexts action_contexts; { - Mutex::Locker locker(m_lock); - ceph_assert(m_lock.is_locked()); + std::lock_guard locker{m_lock}; + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_actions_contexts.size() == 1); action_contexts = std::move(m_actions_contexts.front()); diff --git a/src/librbd/ManagedLock.h b/src/librbd/ManagedLock.h index b27e549dab47..3e03d00ed2da 100644 --- a/src/librbd/ManagedLock.h +++ b/src/librbd/ManagedLock.h @@ -8,7 +8,6 @@ #include "include/Context.h" #include "include/rados/librados.hpp" #include "common/AsyncOpTracker.h" -#include "common/Mutex.h" #include "cls/lock/cls_lock_types.h" #include "librbd/watcher/Types.h" #include "librbd/managed_lock/Types.h" @@ -63,67 +62,67 @@ public: int assert_header_locked(); bool is_shutdown() const { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; return is_state_shutdown(); } protected: - mutable Mutex m_lock; + mutable ceph::mutex m_lock; inline void set_state_uninitialized() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_UNLOCKED); m_state = STATE_UNINITIALIZED; } inline void set_state_initializing() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_UNINITIALIZED); m_state = STATE_INITIALIZING; } inline void set_state_unlocked() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING); m_state = STATE_UNLOCKED; } inline void set_state_waiting_for_lock() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_WAITING_FOR_LOCK; } inline void set_state_post_acquiring() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_state == STATE_ACQUIRING); m_state = STATE_POST_ACQUIRING; } bool is_state_shutdown() const; inline bool is_state_acquiring() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_ACQUIRING; } inline bool is_state_post_acquiring() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_POST_ACQUIRING; } inline bool is_state_releasing() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_RELEASING; } inline bool is_state_pre_releasing() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_PRE_RELEASING; } inline bool is_state_locked() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_LOCKED; } inline bool is_state_waiting_for_lock() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return m_state == STATE_WAITING_FOR_LOCK; } inline bool is_action_acquire_lock() const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return get_active_action() == ACTION_ACQUIRE_LOCK; } @@ -228,7 +227,7 @@ private: ActionsContexts m_actions_contexts; AsyncOpTracker m_async_op_tracker; - bool is_lock_owner(Mutex &lock) const; + bool is_lock_owner(ceph::mutex &lock) const; bool is_transition_state() const; void append_context(Action action, Context *ctx); diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc index ad86a8c7b162..b7f3f06e640a 100644 --- a/src/librbd/ObjectMap.cc +++ b/src/librbd/ObjectMap.cc @@ -35,7 +35,7 @@ namespace librbd { template ObjectMap::ObjectMap(I &image_ctx, uint64_t snap_id) : m_image_ctx(image_ctx), m_snap_id(snap_id), - m_lock(util::unique_lock_name("librbd::ObjectMap::lock", this)), + m_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ObjectMap::lock", this))), m_update_guard(new UpdateGuard(m_image_ctx.cct)) { } @@ -72,7 +72,7 @@ bool ObjectMap::is_compatible(const file_layout_t& layout, uint64_t size) { template uint8_t ObjectMap::operator[](uint64_t object_no) const { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; ceph_assert(object_no < m_object_map.size()); return m_object_map[object_no]; } @@ -80,7 +80,7 @@ uint8_t ObjectMap::operator[](uint64_t object_no) const template bool ObjectMap::object_may_exist(uint64_t object_no) const { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); // Fall back to default logic if object map is disabled or invalid if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, @@ -106,7 +106,7 @@ bool ObjectMap::object_may_exist(uint64_t object_no) const template bool ObjectMap::object_may_not_exist(uint64_t object_no) const { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); // Fall back to default logic if object map is disabled or invalid if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, @@ -132,7 +132,7 @@ bool ObjectMap::object_may_not_exist(uint64_t object_no) const template bool ObjectMap::update_required(const ceph::BitVector<2>::Iterator& it, uint8_t new_state) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); uint8_t state = *it; if ((state == new_state) || (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) || @@ -162,20 +162,20 @@ void ObjectMap::close(Context *on_finish) { template bool ObjectMap::set_object_map(ceph::BitVector<2> &target_object_map) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.image_lock)); - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; m_object_map = target_object_map; return true; } template void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; object_map::SnapshotRollbackRequest *req = new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish); req->send(); @@ -183,7 +183,7 @@ void ObjectMap::rollback(uint64_t snap_id, Context *on_finish) { template void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); ceph_assert(snap_id != CEPH_NOSNAP); @@ -195,7 +195,7 @@ void ObjectMap::snapshot_add(uint64_t snap_id, Context *on_finish) { template void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) { - ceph_assert(m_image_ctx.image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock)); ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); ceph_assert(snap_id != CEPH_NOSNAP); @@ -207,11 +207,11 @@ void ObjectMap::snapshot_remove(uint64_t snap_id, Context *on_finish) { template void ObjectMap::aio_save(Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.image_lock)); - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; librados::ObjectWriteOperation op; if (m_snap_id == CEPH_NOSNAP) { @@ -230,8 +230,8 @@ void ObjectMap::aio_save(Context *on_finish) { template void ObjectMap::aio_resize(uint64_t new_size, uint8_t default_object_state, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.image_lock)); ceph_assert(m_image_ctx.image_watcher != NULL); @@ -249,8 +249,8 @@ void ObjectMap::detained_aio_update(UpdateOperation &&op) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; - ceph_assert(m_image_ctx.image_lock.is_locked()); - ceph_assert(m_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); + ceph_assert(ceph_mutex_is_wlocked(m_lock)); BlockGuardCell *cell; int r = m_update_guard->detain({op.start_object_no, op.end_object_no}, @@ -290,8 +290,8 @@ void ObjectMap::handle_detained_aio_update(BlockGuardCell *cell, int r, m_update_guard->release(cell, &block_ops); { - RWLock::RLocker image_locker(m_image_ctx.image_lock); - RWLock::WLocker locker(m_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; + std::unique_lock locker{m_lock}; for (auto &op : block_ops) { detained_aio_update(std::move(op)); } @@ -306,7 +306,7 @@ void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no, const boost::optional ¤t_state, const ZTracer::Trace &parent_trace, bool ignore_enoent, Context *on_finish) { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); ceph_assert(m_image_ctx.image_watcher != nullptr); ceph_assert(m_image_ctx.exclusive_lock == nullptr || @@ -320,7 +320,7 @@ void ObjectMap::aio_update(uint64_t snap_id, uint64_t start_object_no, stringify(static_cast(*current_state)) : "") << "->" << static_cast(new_state) << dendl; if (snap_id == CEPH_NOSNAP) { - ceph_assert(m_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(m_lock)); end_object_no = std::min(end_object_no, m_object_map.size()); if (start_object_no >= end_object_no) { ldout(cct, 20) << "skipping update of invalid object map" << dendl; diff --git a/src/librbd/ObjectMap.h b/src/librbd/ObjectMap.h index 3f7cdd04e015..0d4264887bc2 100644 --- a/src/librbd/ObjectMap.h +++ b/src/librbd/ObjectMap.h @@ -40,13 +40,13 @@ public: uint8_t operator[](uint64_t object_no) const; inline uint64_t size() const { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; return m_object_map.size(); } inline void set_state(uint64_t object_no, uint8_t new_state, const boost::optional ¤t_state) { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ceph_assert(object_no < m_object_map.size()); if (current_state && m_object_map[object_no] != *current_state) { return; @@ -81,7 +81,7 @@ public: const ZTracer::Trace &parent_trace, bool ignore_enoent, T *callback_object) { ceph_assert(start_object_no < end_object_no); - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; if (snap_id == CEPH_NOSNAP) { end_object_no = std::min(end_object_no, m_object_map.size()); @@ -146,7 +146,7 @@ private: ImageCtxT &m_image_ctx; uint64_t m_snap_id; - RWLock m_lock; + mutable ceph::shared_mutex m_lock; ceph::BitVector<2> m_object_map; UpdateGuard *m_update_guard = nullptr; diff --git a/src/librbd/Operations.cc b/src/librbd/Operations.cc index eaee012350e1..cfb9b4467f5f 100644 --- a/src/librbd/Operations.cc +++ b/src/librbd/Operations.cc @@ -176,24 +176,24 @@ struct C_InvokeAsyncRequest : public Context { void send_acquire_exclusive_lock() { // context can complete before owner_lock is unlocked - RWLock &owner_lock(image_ctx.owner_lock); - owner_lock.get_read(); - image_ctx.image_lock.get_read(); + ceph::shared_mutex &owner_lock(image_ctx.owner_lock); + owner_lock.lock_shared(); + image_ctx.image_lock.lock_shared(); if (image_ctx.read_only || (!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) { - image_ctx.image_lock.put_read(); - owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + owner_lock.unlock_shared(); complete(-EROFS); return; } - image_ctx.image_lock.put_read(); + image_ctx.image_lock.unlock_shared(); if (image_ctx.exclusive_lock == nullptr) { send_local_request(); - owner_lock.put_read(); + owner_lock.unlock_shared(); return; } else if (image_ctx.image_watcher == nullptr) { - owner_lock.put_read(); + owner_lock.unlock_shared(); complete(-EROFS); return; } @@ -201,7 +201,7 @@ struct C_InvokeAsyncRequest : public Context { if (image_ctx.exclusive_lock->is_lock_owner() && image_ctx.exclusive_lock->accept_requests()) { send_local_request(); - owner_lock.put_read(); + owner_lock.unlock_shared(); return; } @@ -221,7 +221,7 @@ struct C_InvokeAsyncRequest : public Context { } else { image_ctx.exclusive_lock->try_acquire_lock(ctx); } - owner_lock.put_read(); + owner_lock.unlock_shared(); } void handle_acquire_exclusive_lock(int r) { @@ -234,21 +234,21 @@ struct C_InvokeAsyncRequest : public Context { } // context can complete before owner_lock is unlocked - RWLock &owner_lock(image_ctx.owner_lock); - owner_lock.get_read(); + ceph::shared_mutex &owner_lock(image_ctx.owner_lock); + owner_lock.lock_shared(); if (image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()) { send_local_request(); - owner_lock.put_read(); + owner_lock.unlock_shared(); return; } send_remote_request(); - owner_lock.put_read(); + owner_lock.unlock_shared(); } void send_remote_request() { - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 20) << __func__ << dendl; @@ -283,7 +283,7 @@ struct C_InvokeAsyncRequest : public Context { } void send_local_request() { - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 20) << __func__ << dendl; @@ -348,7 +348,7 @@ int Operations::flatten(ProgressContext &prog_ctx) { } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (m_image_ctx.parent_md.spec.pool_id == -1) { lderr(cct) << "image has no parent" << dendl; return -EINVAL; @@ -373,7 +373,7 @@ int Operations::flatten(ProgressContext &prog_ctx) { template void Operations::execute_flatten(ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -385,18 +385,18 @@ void Operations::execute_flatten(ProgressContext &prog_ctx, return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); // can't flatten a non-clone if (m_image_ctx.parent_md.spec.pool_id == -1) { lderr(cct) << "image has no parent" << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EINVAL); return; } if (m_image_ctx.snap_id != CEPH_NOSNAP) { lderr(cct) << "snapshots cannot be flattened" << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EROFS); return; } @@ -411,7 +411,7 @@ void Operations::execute_flatten(ProgressContext &prog_ctx, uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout, overlap); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); operation::FlattenRequest *req = new operation::FlattenRequest( m_image_ctx, new C_NotifyUpdate(m_image_ctx, on_finish), overlap_objects, @@ -447,7 +447,7 @@ int Operations::rebuild_object_map(ProgressContext &prog_ctx) { template void Operations::execute_rebuild_object_map(ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -494,7 +494,7 @@ template void Operations::object_map_iterate(ProgressContext &prog_ctx, operation::ObjectIterateWork handle_mismatch, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -545,7 +545,7 @@ int Operations::rename(const char *dstname) { } else { C_SaferCond cond_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; execute_rename(dstname, &cond_ctx); } @@ -562,7 +562,7 @@ int Operations::rename(const char *dstname) { template void Operations::execute_rename(const std::string &dest_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -573,13 +573,13 @@ void Operations::execute_rename(const std::string &dest_name, return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.name == dest_name) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EEXIST); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name @@ -595,7 +595,7 @@ void Operations::execute_rename(const std::string &dest_name, m_image_ctx.image_watcher->register_watch(on_finish); }); on_finish = new FunctionContext([this, dest_name, on_finish](int r) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; operation::RenameRequest *req = new operation::RenameRequest( m_image_ctx, on_finish, dest_name); req->send(); @@ -612,11 +612,11 @@ template int Operations::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) { CephContext *cct = m_image_ctx.cct; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); ldout(cct, 5) << this << " " << __func__ << ": " << "size=" << m_image_ctx.size << ", " << "new_size=" << size << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); int r = m_image_ctx.state->refresh_if_required(); if (r < 0) { @@ -646,29 +646,29 @@ template void Operations::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx, Context *on_finish, uint64_t journal_op_tid) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); CephContext *cct = m_image_ctx.cct; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); ldout(cct, 5) << this << " " << __func__ << ": " << "size=" << m_image_ctx.size << ", " << "new_size=" << size << dendl; if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only || m_image_ctx.operations_disabled) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EROFS); return; } else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, m_image_ctx.image_lock) && !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EINVAL); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); operation::ResizeRequest *req = new operation::ResizeRequest( m_image_ctx, new C_NotifyUpdate(m_image_ctx, on_finish), size, allow_shrink, @@ -713,13 +713,13 @@ void Operations::snap_create(const cls::rbd::SnapshotNamespace &snap_namespac return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EEXIST); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); C_InvokeAsyncRequest *req = new C_InvokeAsyncRequest( m_image_ctx, "snap_create", true, @@ -737,7 +737,7 @@ void Operations::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_ Context *on_finish, uint64_t journal_op_tid, bool skip_object_map) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -750,13 +750,13 @@ void Operations::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_ return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EEXIST); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); operation::SnapshotCreateRequest *req = new operation::SnapshotCreateRequest( @@ -779,10 +779,10 @@ int Operations::snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespa C_SaferCond cond_ctx; { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; { // need to drop image_lock before invalidating cache - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (!m_image_ctx.snap_exists) { return -ENOENT; } @@ -820,7 +820,7 @@ void Operations::execute_snap_rollback(const cls::rbd::SnapshotNamespace& sna const std::string &snap_name, ProgressContext& prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name << dendl; @@ -830,17 +830,17 @@ void Operations::execute_snap_rollback(const cls::rbd::SnapshotNamespace& sna return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name); if (snap_id == CEPH_NOSNAP) { lderr(cct) << "No such snapshot found." << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-ENOENT); return; } uint64_t new_size = m_image_ctx.get_image_size(snap_id); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); // async mode used for journal replay operation::SnapshotRollbackRequest *request = @@ -888,16 +888,16 @@ void Operations::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespac } // quickly filter out duplicate ops - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-ENOENT); return; } bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 || (m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (proxy_op) { C_InvokeAsyncRequest *req = new C_InvokeAsyncRequest( @@ -908,7 +908,7 @@ void Operations::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespac {-ENOENT}, on_finish); req->send(); } else { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; execute_snap_remove(snap_namespace, snap_name, on_finish); } } @@ -917,7 +917,7 @@ template void Operations::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); { if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) { ceph_assert(m_image_ctx.exclusive_lock == nullptr || @@ -934,11 +934,11 @@ void Operations::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_ return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name); if (snap_id == CEPH_NOSNAP) { lderr(m_image_ctx.cct) << "No such snapshot found." << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-ENOENT); return; } @@ -946,16 +946,16 @@ void Operations::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_ bool is_protected; int r = m_image_ctx.is_snap_protected(snap_id, &is_protected); if (r < 0) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(r); return; } else if (is_protected) { lderr(m_image_ctx.cct) << "snapshot is protected" << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EBUSY); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); operation::SnapshotRemoveRequest *req = new operation::SnapshotRemoveRequest( @@ -981,7 +981,7 @@ int Operations::snap_rename(const char *srcname, const char *dstname) { return r; { - RWLock::RLocker l(m_image_ctx.image_lock); + std::shared_lock l{m_image_ctx.image_lock}; snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname); if (snap_id == CEPH_NOSNAP) { return -ENOENT; @@ -1004,7 +1004,7 @@ int Operations::snap_rename(const char *srcname, const char *dstname) { } else { C_SaferCond cond_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; execute_snap_rename(snap_id, dstname, &cond_ctx); } @@ -1022,7 +1022,7 @@ template void Operations::execute_snap_rename(const uint64_t src_snap_id, const std::string &dest_snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) { ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1033,15 +1033,15 @@ void Operations::execute_snap_rename(const uint64_t src_snap_id, return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), dest_snap_name) != CEPH_NOSNAP) { // Renaming is supported for snapshots from user namespace only. - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EEXIST); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": " @@ -1077,7 +1077,7 @@ int Operations::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespac } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; bool is_protected; r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name), &is_protected); @@ -1103,7 +1103,7 @@ int Operations::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespac } else { C_SaferCond cond_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; execute_snap_protect(snap_namespace, snap_name, &cond_ctx); } @@ -1119,7 +1119,7 @@ template void Operations::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1130,20 +1130,20 @@ void Operations::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); bool is_protected; int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name), &is_protected); if (r < 0) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(r); return; } else if (is_protected) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EBUSY); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name @@ -1172,7 +1172,7 @@ int Operations::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namesp } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; bool is_unprotected; r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name), &is_unprotected); @@ -1198,7 +1198,7 @@ int Operations::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namesp } else { C_SaferCond cond_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx); } @@ -1214,7 +1214,7 @@ template void Operations::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace, const std::string &snap_name, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) { ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1225,20 +1225,20 @@ void Operations::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& sn return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); bool is_unprotected; int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name), &is_unprotected); if (r < 0) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(r); return; } else if (is_unprotected) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EINVAL); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name @@ -1266,7 +1266,7 @@ int Operations::snap_set_limit(uint64_t limit) { C_SaferCond limit_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; r = prepare_image_update(true); if (r < 0) { return r; @@ -1282,7 +1282,7 @@ int Operations::snap_set_limit(uint64_t limit) { template void Operations::execute_snap_set_limit(const uint64_t limit, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit @@ -1334,7 +1334,7 @@ int Operations::update_features(uint64_t features, bool enabled) { return -EINVAL; } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (enabled && (features & m_image_ctx.features) != 0) { lderr(cct) << "one or more requested features are already enabled" << dendl; @@ -1351,13 +1351,13 @@ int Operations::update_features(uint64_t features, bool enabled) { // when acquiring the exclusive lock in case the journal is corrupt bool disabling_journal = false; if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; m_image_ctx.set_journal_policy(new journal::DisabledPolicy()); disabling_journal = true; } BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) { if (disabling_journal) { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; m_image_ctx.set_journal_policy( new journal::StandardPolicy(&m_image_ctx)); } @@ -1370,7 +1370,7 @@ int Operations::update_features(uint64_t features, bool enabled) { if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) { C_SaferCond cond_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; r = prepare_image_update(true); if (r < 0) { return r; @@ -1396,7 +1396,7 @@ template void Operations::execute_update_features(uint64_t features, bool enabled, Context *on_finish, uint64_t journal_op_tid) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1455,7 +1455,7 @@ int Operations::metadata_set(const std::string &key, C_SaferCond metadata_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; r = prepare_image_update(true); if (r < 0) { return r; @@ -1477,7 +1477,7 @@ template void Operations::execute_metadata_set(const std::string &key, const std::string &value, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value=" @@ -1516,7 +1516,7 @@ int Operations::metadata_remove(const std::string &key) { C_SaferCond metadata_ctx; { - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; r = prepare_image_update(true); if (r < 0) { return r; @@ -1539,7 +1539,7 @@ int Operations::metadata_remove(const std::string &key) { template void Operations::execute_metadata_remove(const std::string &key, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl; @@ -1571,7 +1571,7 @@ int Operations::migrate(ProgressContext &prog_ctx) { } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (m_image_ctx.migration_info.empty()) { lderr(cct) << "image has no migrating parent" << dendl; return -EINVAL; @@ -1596,7 +1596,7 @@ int Operations::migrate(ProgressContext &prog_ctx) { template void Operations::execute_migrate(ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1608,22 +1608,22 @@ void Operations::execute_migrate(ProgressContext &prog_ctx, return; } - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); if (m_image_ctx.migration_info.empty()) { lderr(cct) << "image has no migrating parent" << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EINVAL); return; } if (m_image_ctx.snap_id != CEPH_NOSNAP) { lderr(cct) << "snapshots cannot be migrated" << dendl; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); on_finish->complete(-EROFS); return; } - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); operation::MigrateRequest *req = new operation::MigrateRequest( m_image_ctx, new C_NotifyUpdate(m_image_ctx, on_finish), prog_ctx); @@ -1662,7 +1662,7 @@ template void Operations::execute_sparsify(size_t sparse_size, ProgressContext &prog_ctx, Context *on_finish) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); @@ -1682,18 +1682,17 @@ void Operations::execute_sparsify(size_t sparse_size, template int Operations::prepare_image_update(bool request_lock) { - ceph_assert(m_image_ctx.owner_lock.is_locked() && - !m_image_ctx.owner_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock)); if (m_image_ctx.image_watcher == nullptr) { return -EROFS; } // need to upgrade to a write lock C_SaferCond ctx; - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); bool attempting_lock = false; { - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); + std::unique_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock != nullptr && (!m_image_ctx.exclusive_lock->is_lock_owner() || !m_image_ctx.exclusive_lock->accept_requests())) { @@ -1714,7 +1713,7 @@ int Operations::prepare_image_update(bool request_lock) { r = ctx.wait(); } - m_image_ctx.owner_lock.get_read(); + m_image_ctx.owner_lock.lock_shared(); if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) { m_image_ctx.exclusive_lock->unblock_requests(); } diff --git a/src/librbd/TaskFinisher.h b/src/librbd/TaskFinisher.h index 410b8ee88e47..67507efd0e41 100644 --- a/src/librbd/TaskFinisher.h +++ b/src/librbd/TaskFinisher.h @@ -6,7 +6,7 @@ #include "include/Context.h" #include "common/ceph_context.h" #include "common/Finisher.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/Timer.h" #include #include @@ -16,12 +16,11 @@ class CephContext; namespace librbd { struct TaskFinisherSingleton { - Mutex m_lock; + ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock"); SafeTimer *m_safe_timer; Finisher *m_finisher; - explicit TaskFinisherSingleton(CephContext *cct) - : m_lock("librbd::TaskFinisher::m_lock") { + explicit TaskFinisherSingleton(CephContext *cct) { m_safe_timer = new SafeTimer(cct, m_lock, false); m_safe_timer->init(); m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd"); @@ -29,7 +28,7 @@ struct TaskFinisherSingleton { } virtual ~TaskFinisherSingleton() { { - Mutex::Locker l(m_lock); + std::lock_guard l{m_lock}; m_safe_timer->shutdown(); delete m_safe_timer; } @@ -53,7 +52,7 @@ public: } void cancel(const Task& task) { - Mutex::Locker l(*m_lock); + std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { delete it->second.first; @@ -64,7 +63,7 @@ public: void cancel_all(Context *comp) { { - Mutex::Locker l(*m_lock); + std::lock_guard l{*m_lock}; for (typename TaskContexts::iterator it = m_task_contexts.begin(); it != m_task_contexts.end(); ++it) { delete it->second.first; @@ -76,7 +75,7 @@ public: } bool add_event_after(const Task& task, double seconds, Context *ctx) { - Mutex::Locker l(*m_lock); + std::lock_guard l{*m_lock}; if (m_task_contexts.count(task) != 0) { // task already scheduled on finisher or timer delete ctx; @@ -94,7 +93,7 @@ public: } bool queue(const Task& task, Context *ctx) { - Mutex::Locker l(*m_lock); + std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { if (it->second.second != NULL) { @@ -130,7 +129,7 @@ private: CephContext &m_cct; - Mutex *m_lock; + ceph::mutex *m_lock; Finisher *m_finisher; SafeTimer *m_safe_timer; @@ -140,7 +139,7 @@ private: void complete(const Task& task) { Context *ctx = NULL; { - Mutex::Locker l(*m_lock); + std::lock_guard l{*m_lock}; typename TaskContexts::iterator it = m_task_contexts.find(task); if (it != m_task_contexts.end()) { ctx = it->second.first; diff --git a/src/librbd/Watcher.cc b/src/librbd/Watcher.cc index c02598983e70..f13e02d5834f 100644 --- a/src/librbd/Watcher.cc +++ b/src/librbd/Watcher.cc @@ -91,20 +91,20 @@ Watcher::Watcher(librados::IoCtx& ioctx, ContextWQ *work_queue, const string& oid) : m_ioctx(ioctx), m_work_queue(work_queue), m_oid(oid), m_cct(reinterpret_cast(ioctx.cct())), - m_watch_lock(util::unique_lock_name("librbd::Watcher::m_watch_lock", this)), + m_watch_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::Watcher::m_watch_lock", this))), m_watch_handle(0), m_notifier(work_queue, ioctx, oid), m_watch_state(WATCH_STATE_IDLE), m_watch_ctx(*this) { } Watcher::~Watcher() { - RWLock::RLocker l(m_watch_lock); + std::shared_lock l{m_watch_lock}; ceph_assert(is_unregistered(m_watch_lock)); } void Watcher::register_watch(Context *on_finish) { ldout(m_cct, 10) << dendl; - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(is_unregistered(m_watch_lock)); m_watch_state = WATCH_STATE_REGISTERING; m_watch_blacklisted = false; @@ -122,7 +122,7 @@ void Watcher::handle_register_watch(int r, Context *on_finish) { bool watch_error = false; Context *unregister_watch_ctx = nullptr; { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(m_watch_state == WATCH_STATE_REGISTERING); m_watch_state = WATCH_STATE_IDLE; @@ -156,7 +156,7 @@ void Watcher::unregister_watch(Context *on_finish) { ldout(m_cct, 10) << dendl; { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; if (m_watch_state != WATCH_STATE_IDLE) { ldout(m_cct, 10) << "delaying unregister until register completed" << dendl; @@ -183,7 +183,7 @@ void Watcher::unregister_watch(Context *on_finish) { } bool Watcher::notifications_blocked() const { - RWLock::RLocker locker(m_watch_lock); + std::shared_lock locker{m_watch_lock}; bool blocked = (m_blocked_count > 0); ldout(m_cct, 5) << "blocked=" << blocked << dendl; @@ -192,7 +192,7 @@ bool Watcher::notifications_blocked() const { void Watcher::block_notifies(Context *on_finish) { { - RWLock::WLocker locker(m_watch_lock); + std::unique_lock locker{m_watch_lock}; ++m_blocked_count; ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl; } @@ -200,7 +200,7 @@ void Watcher::block_notifies(Context *on_finish) { } void Watcher::unblock_notifies() { - RWLock::WLocker locker(m_watch_lock); + std::unique_lock locker{m_watch_lock}; ceph_assert(m_blocked_count > 0); --m_blocked_count; ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl; @@ -211,12 +211,12 @@ void Watcher::flush(Context *on_finish) { } std::string Watcher::get_oid() const { - RWLock::RLocker locker(m_watch_lock); + std::shared_lock locker{m_watch_lock}; return m_oid; } void Watcher::set_oid(const string& oid) { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(is_unregistered(m_watch_lock)); m_oid = oid; @@ -225,7 +225,7 @@ void Watcher::set_oid(const string& oid) { void Watcher::handle_error(uint64_t handle, int err) { lderr(m_cct) << "handle=" << handle << ": " << cpp_strerror(err) << dendl; - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; m_watch_error = true; if (is_registered(m_watch_lock)) { @@ -250,7 +250,7 @@ void Watcher::rewatch() { Context *unregister_watch_ctx = nullptr; { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); if (m_unregister_watch_ctx != nullptr) { @@ -276,7 +276,7 @@ void Watcher::handle_rewatch(int r) { bool watch_error = false; Context *unregister_watch_ctx = nullptr; { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); m_watch_blacklisted = false; @@ -318,7 +318,7 @@ void Watcher::handle_rewatch_callback(int r) { bool watch_error = false; Context *unregister_watch_ctx = nullptr; { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; ceph_assert(m_watch_state == WATCH_STATE_REWATCHING); if (m_unregister_watch_ctx != nullptr) { diff --git a/src/librbd/Watcher.h b/src/librbd/Watcher.h index 69e87ad30ce9..e2889605010c 100644 --- a/src/librbd/Watcher.h +++ b/src/librbd/Watcher.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_WATCHER_H #include "common/AsyncOpTracker.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/RWLock.h" #include "include/rados/librados.hpp" #include "librbd/watcher/Notifier.h" @@ -48,20 +48,20 @@ public: void set_oid(const string& oid); uint64_t get_watch_handle() const { - RWLock::RLocker watch_locker(m_watch_lock); + std::shared_lock watch_locker{m_watch_lock}; return m_watch_handle; } bool is_registered() const { - RWLock::RLocker locker(m_watch_lock); + std::shared_lock locker{m_watch_lock}; return is_registered(m_watch_lock); } bool is_unregistered() const { - RWLock::RLocker locker(m_watch_lock); + std::shared_lock locker{m_watch_lock}; return is_unregistered(m_watch_lock); } bool is_blacklisted() const { - RWLock::RLocker locker(m_watch_lock); + std::shared_lock locker{m_watch_lock}; return m_watch_blacklisted; } @@ -76,7 +76,7 @@ protected: ContextWQ *m_work_queue; std::string m_oid; CephContext *m_cct; - mutable RWLock m_watch_lock; + mutable ceph::shared_mutex m_watch_lock; uint64_t m_watch_handle; watcher::Notifier m_notifier; @@ -85,10 +85,10 @@ protected: AsyncOpTracker m_async_op_tracker; - bool is_registered(const RWLock&) const { + bool is_registered(const ceph::shared_mutex&) const { return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0); } - bool is_unregistered(const RWLock&) const { + bool is_unregistered(const ceph::shared_mutex&) const { return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0); } diff --git a/src/librbd/api/DiffIterate.cc b/src/librbd/api/DiffIterate.cc index a45180b0b004..86be5b4e948a 100644 --- a/src/librbd/api/DiffIterate.cc +++ b/src/librbd/api/DiffIterate.cc @@ -13,6 +13,7 @@ #include "include/rados/librados.hpp" #include "include/interval_set.h" #include "common/errno.h" +#include "common/Cond.h" #include "common/Throttle.h" #include "osdc/Striper.h" #include "librados/snap_set_diff.h" @@ -241,7 +242,7 @@ int DiffIterate::diff_iterate(I *ictx, // ensure previous writes are visible to listsnaps C_SaferCond flush_ctx; { - RWLock::RLocker owner_locker(ictx->owner_lock); + std::shared_lock owner_locker{ictx->owner_lock}; auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, ictx, io::AIO_TYPE_FLUSH); auto req = io::ImageDispatchSpec::create_flush_request( @@ -259,9 +260,9 @@ int DiffIterate::diff_iterate(I *ictx, return r; } - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); r = clip_io(ictx, off, &len); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (r < 0) { return r; } @@ -282,7 +283,7 @@ int DiffIterate::execute() { uint64_t from_size = 0; uint64_t end_size; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; head_ctx.dup(m_image_ctx.data_ctx); if (m_from_snap_name) { from_snap_id = m_image_ctx.get_snap_id(m_from_snap_namespace, m_from_snap_name); @@ -307,7 +308,7 @@ int DiffIterate::execute() { bool fast_diff_enabled = false; BitVector<2> object_diff_state; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (m_whole_object && (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) { r = diff_object_map(from_snap_id, end_snap_id, &object_diff_state); if (r < 0) { @@ -330,7 +331,7 @@ int DiffIterate::execute() { DiffContext diff_context(m_image_ctx, m_callback, m_callback_arg, m_whole_object, from_snap_id, end_snap_id); if (m_include_parent && from_snap_id == 0) { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; uint64_t overlap = 0; m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &overlap); r = 0; @@ -409,7 +410,7 @@ int DiffIterate::execute() { template int DiffIterate::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id, BitVector<2>* object_diff_state) { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); CephContext* cct = m_image_ctx.cct; bool diff_from_start = (from_snap_id == 0); diff --git a/src/librbd/api/Group.cc b/src/librbd/api/Group.cc index 04f155b5189b..aa341bc25328 100644 --- a/src/librbd/api/Group.cc +++ b/src/librbd/api/Group.cc @@ -38,7 +38,7 @@ namespace { template snap_t get_group_snap_id(I* ictx, const cls::rbd::SnapshotNamespace& in_snap_namespace) { - ceph_assert(ictx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(ictx->image_lock)); auto it = ictx->snap_ids.lower_bound({in_snap_namespace, ""}); if (it != ictx->snap_ids.end() && it->first.first == in_snap_namespace) { return it->second; @@ -261,10 +261,10 @@ int group_snap_remove_by_record(librados::IoCtx& group_ioctx, on_finishes[i] = new C_SaferCond; std::string snap_name; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); snap_t snap_id = get_group_snap_id(ictx, ne); r = ictx->get_snap_name(snap_id, &snap_name); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (r >= 0) { ldout(cct, 20) << "removing individual snapshot from image " << ictx->name @@ -363,14 +363,14 @@ int group_snap_rollback_by_record(librados::IoCtx& group_ioctx, ldout(cct, 20) << "Requesting exclusive locks for images" << dendl; for (auto ictx: ictxs) { - RWLock::RLocker owner_lock(ictx->owner_lock); + std::shared_lock owner_lock{ictx->owner_lock}; if (ictx->exclusive_lock != nullptr) { ictx->exclusive_lock->block_requests(-EBUSY); } } for (int i = 0; i < snap_count; ++i) { ImageCtx *ictx = ictxs[i]; - RWLock::RLocker owner_lock(ictx->owner_lock); + std::shared_lock owner_lock{ictx->owner_lock}; on_finishes[i] = new C_SaferCond; if (ictx->exclusive_lock != nullptr) { @@ -398,12 +398,12 @@ int group_snap_rollback_by_record(librados::IoCtx& group_ioctx, ImageCtx *ictx = ictxs[i]; on_finishes[i] = new C_SaferCond; - RWLock::RLocker owner_locker(ictx->owner_lock); + std::shared_lock owner_locker{ictx->owner_lock}; std::string snap_name; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); snap_t snap_id = get_group_snap_id(ictx, ne); r = ictx->get_snap_name(snap_id, &snap_name); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (r >= 0) { ldout(cct, 20) << "rolling back to individual snapshot for image " << ictx->name @@ -928,14 +928,14 @@ int Group::snap_create(librados::IoCtx& group_ioctx, ldout(cct, 20) << "Requesting exclusive locks for images" << dendl; for (auto ictx: ictxs) { - RWLock::RLocker owner_lock(ictx->owner_lock); + std::shared_lock owner_lock{ictx->owner_lock}; if (ictx->exclusive_lock != nullptr) { ictx->exclusive_lock->block_requests(-EBUSY); } } for (int i = 0; i < image_count; ++i) { ImageCtx *ictx = ictxs[i]; - RWLock::RLocker owner_lock(ictx->owner_lock); + std::shared_lock owner_lock{ictx->owner_lock}; on_finishes[i] = new C_SaferCond; if (ictx->exclusive_lock != nullptr) { @@ -980,9 +980,9 @@ int Group::snap_create(librados::IoCtx& group_ioctx, ret_code = r; } else { ImageCtx *ictx = ictxs[i]; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); snap_t snap_id = get_group_snap_id(ictx, ne); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (snap_id == CEPH_NOSNAP) { ldout(cct, 20) << "Couldn't find created snapshot with namespace: " << ne << dendl; @@ -1018,10 +1018,10 @@ remove_image_snaps: on_finishes[i] = new C_SaferCond; std::string snap_name; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); snap_t snap_id = get_group_snap_id(ictx, ne); r = ictx->get_snap_name(snap_id, &snap_name); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (r >= 0) { ictx->operations->snap_remove(ne, snap_name.c_str(), on_finishes[i]); } else { diff --git a/src/librbd/api/Image.cc b/src/librbd/api/Image.cc index 78140565386e..69e9f80b29c3 100644 --- a/src/librbd/api/Image.cc +++ b/src/librbd/api/Image.cc @@ -86,7 +86,7 @@ int Image::get_op_features(I *ictx, uint64_t *op_features) { return r; } - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; *op_features = ictx->op_features; return 0; } @@ -200,12 +200,12 @@ int Image::get_parent(I *ictx, return r; } - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; bool release_image_lock = false; BOOST_SCOPE_EXIT_ALL(ictx, &release_image_lock) { if (release_image_lock) { - ictx->parent->image_lock.put_read(); + ictx->parent->image_lock.unlock_shared(); } }; @@ -214,7 +214,7 @@ int Image::get_parent(I *ictx, auto parent = ictx->parent; if (!ictx->migration_info.empty() && ictx->parent != nullptr) { release_image_lock = true; - ictx->parent->image_lock.get_read(); + ictx->parent->image_lock.lock_shared(); parent = ictx->parent->parent; } @@ -227,7 +227,7 @@ int Image::get_parent(I *ictx, parent_image->pool_name = parent->md_ctx.get_pool_name(); parent_image->pool_namespace = parent->md_ctx.get_namespace(); - RWLock::RLocker parent_image_locker(parent->image_lock); + std::shared_lock parent_image_locker{parent->image_lock}; parent_snap->id = parent->snap_id; parent_snap->namespace_type = RBD_SNAP_NAMESPACE_TYPE_USER; if (parent->snap_id != CEPH_NOSNAP) { @@ -309,7 +309,7 @@ template int Image::list_descendants( I *ictx, const std::optional &max_level, std::vector *images) { - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; std::vector snap_ids; if (ictx->snap_id != CEPH_NOSNAP) { snap_ids.push_back(ictx->snap_id); @@ -516,7 +516,7 @@ int Image::deep_copy(I *src, librados::IoCtx& dest_md_ctx, uint64_t features; uint64_t src_size; { - RWLock::RLocker image_locker(src->image_lock); + std::shared_lock image_locker{src->image_lock}; if (!src->migration_info.empty()) { lderr(cct) << "cannot deep copy migrating image" << dendl; @@ -563,7 +563,7 @@ int Image::deep_copy(I *src, librados::IoCtx& dest_md_ctx, if (flatten > 0) { parent_spec.pool_id = -1; } else { - RWLock::RLocker image_locker(src->image_lock); + std::shared_lock image_locker{src->image_lock}; // use oldest snapshot or HEAD for parent spec if (!src->snap_info.empty()) { @@ -610,7 +610,7 @@ int Image::deep_copy(I *src, librados::IoCtx& dest_md_ctx, C_SaferCond lock_ctx; { - RWLock::WLocker locker(dest->owner_lock); + std::unique_lock locker{dest->owner_lock}; if (dest->exclusive_lock == nullptr || dest->exclusive_lock->is_lock_owner()) { @@ -644,7 +644,7 @@ int Image::deep_copy(I *src, I *dest, bool flatten, librados::snap_t snap_id_start = 0; librados::snap_t snap_id_end; { - RWLock::RLocker image_locker(src->image_lock); + std::shared_lock image_locker{src->image_lock}; snap_id_end = src->snap_id; } @@ -680,7 +680,7 @@ int Image::snap_set(I *ictx, uint64_t snap_id = CEPH_NOSNAP; std::string name(snap_name == nullptr ? "" : snap_name); if (!name.empty()) { - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace{}, snap_name); if (snap_id == CEPH_NOSNAP) { diff --git a/src/librbd/api/Migration.cc b/src/librbd/api/Migration.cc index b0d80c94a49b..b77039e70971 100644 --- a/src/librbd/api/Migration.cc +++ b/src/librbd/api/Migration.cc @@ -6,6 +6,7 @@ #include "include/stringify.h" #include "common/dout.h" #include "common/errno.h" +#include "common/Cond.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" @@ -62,8 +63,9 @@ public: ProgressContext *prog_ctx) : m_io_ctx(io_ctx), m_header_oid(header_oid), m_state(state), m_prog_ctx(prog_ctx), m_cct(reinterpret_cast(io_ctx.cct())), - m_lock(util::unique_lock_name("librbd::api::MigrationProgressContext", - this)) { + m_lock(ceph::make_mutex( + util::unique_lock_name("librbd::api::MigrationProgressContext", + this))) { ceph_assert(m_prog_ctx != nullptr); } @@ -90,14 +92,14 @@ private: ProgressContext *m_prog_ctx; CephContext* m_cct; - mutable Mutex m_lock; - Cond m_cond; + mutable ceph::mutex m_lock; + ceph::condition_variable m_cond; std::string m_state_description; bool m_pending_update = false; int m_in_flight_state_updates = 0; void send_state_description_update(const std::string &description) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (description == m_state_description) { return; @@ -116,7 +118,7 @@ private: void set_state_description() { ldout(m_cct, 20) << "state_description=" << m_state_description << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); librados::ObjectWriteOperation op; cls_client::migration_set_state(&op, m_state, m_state_description); @@ -134,7 +136,7 @@ private: void handle_set_state_description(int r) { ldout(m_cct, 20) << "r=" << r << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; m_in_flight_state_updates--; @@ -145,20 +147,17 @@ private: set_state_description(); m_pending_update = false; } else { - m_cond.Signal(); + m_cond.notify_all(); } } void wait_for_in_flight_updates() { - Mutex::Locker locker(m_lock); + std::unique_lock locker{m_lock}; ldout(m_cct, 20) << "m_in_flight_state_updates=" << m_in_flight_state_updates << dendl; - m_pending_update = false; - while (m_in_flight_state_updates > 0) { - m_cond.Wait(m_lock); - } + m_cond.wait(locker, [this] { return m_in_flight_state_updates <= 0; }); } }; @@ -389,7 +388,7 @@ int Migration::prepare(librados::IoCtx& io_ctx, uint64_t features; { - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock image_locker{image_ctx->image_lock}; features = image_ctx->features; } opts.get(RBD_IMAGE_OPTION_FEATURES, &features); @@ -718,7 +717,7 @@ int Migration::execute() { m_prog_ctx); r = dst_image_ctx->operations->migrate(prog_ctx); if (r == -EROFS) { - RWLock::RLocker owner_locker(dst_image_ctx->owner_lock); + std::shared_lock owner_locker{dst_image_ctx->owner_lock}; if (dst_image_ctx->exclusive_lock != nullptr && !dst_image_ctx->exclusive_lock->accept_ops()) { ldout(m_cct, 5) << "lost exclusive lock, retrying remote" << dendl; @@ -750,12 +749,12 @@ int Migration::abort() { int r; - m_src_image_ctx->owner_lock.get_read(); + m_src_image_ctx->owner_lock.lock_shared(); if (m_src_image_ctx->exclusive_lock != nullptr && !m_src_image_ctx->exclusive_lock->is_lock_owner()) { C_SaferCond ctx; m_src_image_ctx->exclusive_lock->acquire_lock(&ctx); - m_src_image_ctx->owner_lock.put_read(); + m_src_image_ctx->owner_lock.unlock_shared(); r = ctx.wait(); if (r < 0) { lderr(m_cct) << "error acquiring exclusive lock: " << cpp_strerror(r) @@ -763,7 +762,7 @@ int Migration::abort() { return r; } } else { - m_src_image_ctx->owner_lock.put_read(); + m_src_image_ctx->owner_lock.unlock_shared(); } group_info_t group_info; @@ -1026,7 +1025,7 @@ int Migration::validate_src_snaps() { } for (auto &snap : snaps) { - RWLock::RLocker image_locker(m_src_image_ctx->image_lock); + std::shared_lock image_locker{m_src_image_ctx->image_lock}; cls::rbd::ParentImageSpec parent_spec{m_src_image_ctx->md_ctx.get_id(), m_src_image_ctx->md_ctx.get_namespace(), m_src_image_ctx->id, snap.id}; @@ -1121,12 +1120,12 @@ template int Migration::v2_unlink_src_image() { ldout(m_cct, 10) << dendl; - m_src_image_ctx->owner_lock.get_read(); + m_src_image_ctx->owner_lock.lock_shared(); if (m_src_image_ctx->exclusive_lock != nullptr && m_src_image_ctx->exclusive_lock->is_lock_owner()) { C_SaferCond ctx; m_src_image_ctx->exclusive_lock->release_lock(&ctx); - m_src_image_ctx->owner_lock.put_read(); + m_src_image_ctx->owner_lock.unlock_shared(); int r = ctx.wait(); if (r < 0) { lderr(m_cct) << "error releasing exclusive lock: " << cpp_strerror(r) @@ -1134,7 +1133,7 @@ int Migration::v2_unlink_src_image() { return r; } } else { - m_src_image_ctx->owner_lock.put_read(); + m_src_image_ctx->owner_lock.unlock_shared(); } int r = Trash::move(m_src_io_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION, @@ -1193,7 +1192,7 @@ int Migration::create_dst_image() { uint64_t size; cls::rbd::ParentImageSpec parent_spec; { - RWLock::RLocker image_locker(m_src_image_ctx->image_lock); + std::shared_lock image_locker{m_src_image_ctx->image_lock}; size = m_src_image_ctx->size; // use oldest snapshot or HEAD for parent spec @@ -1256,7 +1255,7 @@ int Migration::create_dst_image() { } BOOST_SCOPE_EXIT_END; { - RWLock::RLocker owner_locker(dst_image_ctx->owner_lock); + std::shared_lock owner_locker{dst_image_ctx->owner_lock}; r = dst_image_ctx->operations->prepare_image_update(true); if (r < 0) { lderr(m_cct) << "cannot obtain exclusive lock" << dendl; @@ -1541,7 +1540,7 @@ int Migration::relink_children(I *from_image_ctx, I *to_image_ctx) { // Also collect the list of the children currently attached to the // source, so we could make a proper decision later about relinking. - RWLock::RLocker src_image_locker(to_image_ctx->image_lock); + std::shared_lock src_image_locker{to_image_ctx->image_lock}; cls::rbd::ParentImageSpec src_parent_spec{to_image_ctx->md_ctx.get_id(), to_image_ctx->md_ctx.get_namespace(), to_image_ctx->id, snap.id}; @@ -1553,7 +1552,7 @@ int Migration::relink_children(I *from_image_ctx, I *to_image_ctx) { return r; } - RWLock::RLocker image_locker(from_image_ctx->image_lock); + std::shared_lock image_locker{from_image_ctx->image_lock}; snap.id = from_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap.name); if (snap.id == CEPH_NOSNAP) { @@ -1564,7 +1563,7 @@ int Migration::relink_children(I *from_image_ctx, I *to_image_ctx) { std::vector child_images; { - RWLock::RLocker image_locker(from_image_ctx->image_lock); + std::shared_lock image_locker{from_image_ctx->image_lock}; cls::rbd::ParentImageSpec parent_spec{from_image_ctx->md_ctx.get_id(), from_image_ctx->md_ctx.get_namespace(), from_image_ctx->id, snap.id}; @@ -1614,7 +1613,7 @@ int Migration::relink_child(I *from_image_ctx, I *to_image_ctx, librados::snap_t to_snap_id; { - RWLock::RLocker image_locker(to_image_ctx->image_lock); + std::shared_lock image_locker{to_image_ctx->image_lock}; to_snap_id = to_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(), from_snap.name); if (to_snap_id == CEPH_NOSNAP) { @@ -1652,7 +1651,7 @@ int Migration::relink_child(I *from_image_ctx, I *to_image_ctx, cls::rbd::ParentImageSpec parent_spec; uint64_t parent_overlap; { - RWLock::RLocker image_locker(child_image_ctx->image_lock); + std::shared_lock image_locker{child_image_ctx->image_lock}; // use oldest snapshot or HEAD for parent spec if (!child_image_ctx->snap_info.empty()) { diff --git a/src/librbd/api/Mirror.cc b/src/librbd/api/Mirror.cc index ebdf6acef479..4e67c5123fd8 100644 --- a/src/librbd/api/Mirror.cc +++ b/src/librbd/api/Mirror.cc @@ -195,7 +195,7 @@ int Mirror::image_enable(I *ictx, bool relax_same_pool_parent_check) { // is mirroring not enabled for the parent? { - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; ImageCtx *parent = ictx->parent; if (parent) { if (relax_same_pool_parent_check && @@ -305,7 +305,7 @@ int Mirror::image_disable(I *ictx, bool force) { }; { - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; map snap_info = ictx->snap_info; for (auto &info : snap_info) { cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(), diff --git a/src/librbd/api/Pool.cc b/src/librbd/api/Pool.cc index 88adf561d200..708f797ec698 100644 --- a/src/librbd/api/Pool.cc +++ b/src/librbd/api/Pool.cc @@ -5,6 +5,7 @@ #include "include/rados/librados.hpp" #include "common/dout.h" #include "common/errno.h" +#include "common/Cond.h" #include "common/Throttle.h" #include "cls/rbd/cls_rbd_client.h" #include "osd/osd_types.h" diff --git a/src/librbd/api/Snapshot.cc b/src/librbd/api/Snapshot.cc index a3c314c97626..4d24f91a6159 100644 --- a/src/librbd/api/Snapshot.cc +++ b/src/librbd/api/Snapshot.cc @@ -105,7 +105,7 @@ int Snapshot::get_group_namespace(I *ictx, uint64_t snap_id, return r; } - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; auto snap_info = ictx->get_snap_info(snap_id); if (snap_info == nullptr) { return -ENOENT; @@ -128,7 +128,7 @@ int Snapshot::get_trash_namespace(I *ictx, uint64_t snap_id, return r; } - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; auto snap_info = ictx->get_snap_info(snap_id); if (snap_info == nullptr) { return -ENOENT; @@ -151,7 +151,7 @@ int Snapshot::get_namespace_type(I *ictx, uint64_t snap_id, return r; } - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; auto snap_info = ictx->get_snap_info(snap_id); if (snap_info == nullptr) { return -ENOENT; @@ -174,7 +174,7 @@ int Snapshot::remove(I *ictx, uint64_t snap_id) { cls::rbd::SnapshotNamespace snapshot_namespace; std::string snapshot_name; { - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; auto it = ictx->snap_info.find(snap_id); if (it == ictx->snap_info.end()) { return -ENOENT; diff --git a/src/librbd/api/Trash.cc b/src/librbd/api/Trash.cc index b8deddfb2bda..c384d534f7d0 100644 --- a/src/librbd/api/Trash.cc +++ b/src/librbd/api/Trash.cc @@ -137,32 +137,32 @@ int Trash::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source, if (r == 0) { if (ictx->test_features(RBD_FEATURE_JOURNALING)) { - RWLock::WLocker image_locker(ictx->image_lock); + std::unique_lock image_locker{ictx->image_lock}; ictx->set_journal_policy(new journal::DisabledPolicy()); } - ictx->owner_lock.get_read(); + ictx->owner_lock.lock_shared(); if (ictx->exclusive_lock != nullptr) { ictx->exclusive_lock->block_requests(0); r = ictx->operations->prepare_image_update(false); if (r < 0) { lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl; - ictx->owner_lock.put_read(); + ictx->owner_lock.unlock_shared(); ictx->state->close(); return -EBUSY; } } - ictx->owner_lock.put_read(); + ictx->owner_lock.unlock_shared(); - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); if (!ictx->migration_info.empty()) { lderr(cct) << "cannot move migrating image to trash" << dendl; - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); ictx->state->close(); return -EBUSY; } - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); r = disable_mirroring(ictx); if (r < 0) { diff --git a/src/librbd/cache/ObjectCacherObjectDispatch.cc b/src/librbd/cache/ObjectCacherObjectDispatch.cc index 5e6f9dcf5b7b..0b57201bbbb8 100644 --- a/src/librbd/cache/ObjectCacherObjectDispatch.cc +++ b/src/librbd/cache/ObjectCacherObjectDispatch.cc @@ -45,7 +45,7 @@ struct ObjectCacherObjectDispatch::C_InvalidateCache : public Context { } void finish(int r) override { - ceph_assert(dispatcher->m_cache_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(dispatcher->m_cache_lock)); auto cct = dispatcher->m_image_ctx->cct; if (r == -EBLACKLISTED) { @@ -80,8 +80,8 @@ ObjectCacherObjectDispatch::ObjectCacherObjectDispatch( I* image_ctx, size_t max_dirty, bool writethrough_until_flush) : m_image_ctx(image_ctx), m_max_dirty(max_dirty), m_writethrough_until_flush(writethrough_until_flush), - m_cache_lock(util::unique_lock_name( - "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this)) { + m_cache_lock(ceph::make_mutex(util::unique_lock_name( + "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this))) { } template @@ -97,7 +97,7 @@ void ObjectCacherObjectDispatch::init() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; - m_cache_lock.Lock(); + m_cache_lock.lock(); ldout(cct, 5) << "enabling caching..." << dendl; m_writeback_handler = new ObjectCacherWriteback(m_image_ctx, m_cache_lock); @@ -144,7 +144,7 @@ void ObjectCacherObjectDispatch::init() { m_object_set = new ObjectCacher::ObjectSet(nullptr, m_image_ctx->data_ctx.get_id(), 0); m_object_cacher->start(); - m_cache_lock.Unlock(); + m_cache_lock.unlock(); // add ourself to the IO object dispatcher chain if (m_max_dirty > 0) { @@ -173,10 +173,9 @@ void ObjectCacherObjectDispatch::shut_down(Context* on_finish) { on_finish = new C_InvalidateCache(this, true, on_finish); // flush all pending writeback state - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; m_object_cacher->release_set(m_object_set); m_object_cacher->flush_set(m_object_set, on_finish); - m_cache_lock.Unlock(); } template @@ -195,9 +194,9 @@ bool ObjectCacherObjectDispatch::read( on_dispatched = util::create_async_context_callback(*m_image_ctx, on_dispatched); - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); auto rd = m_object_cacher->prepare_read(snap_id, read_data, op_flags); - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no, object_off, object_len, 0); @@ -208,9 +207,9 @@ bool ObjectCacherObjectDispatch::read( ZTracer::Trace trace(parent_trace); *dispatch_result = io::DISPATCH_RESULT_COMPLETE; - m_cache_lock.Lock(); + m_cache_lock.lock(); int r = m_object_cacher->readx(rd, m_object_set, on_dispatched, &trace); - m_cache_lock.Unlock(); + m_cache_lock.unlock(); if (r != 0) { on_dispatched->complete(r); } @@ -237,9 +236,9 @@ bool ObjectCacherObjectDispatch::discard( auto ctx = *on_finish; *on_finish = new FunctionContext( [this, object_extents, ctx](int r) { - m_cache_lock.Lock(); + m_cache_lock.lock(); m_object_cacher->discard_set(m_object_set, object_extents); - m_cache_lock.Unlock(); + m_cache_lock.unlock(); ctx->complete(r); }); @@ -252,10 +251,9 @@ bool ObjectCacherObjectDispatch::discard( // ensure any in-flight writeback is complete before advancing // the discard request - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; m_object_cacher->discard_writeback(m_object_set, object_extents, on_dispatched); - m_cache_lock.Unlock(); return true; } @@ -274,10 +272,10 @@ bool ObjectCacherObjectDispatch::write( on_dispatched = util::create_async_context_callback(*m_image_ctx, on_dispatched); - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); ObjectCacher::OSDWrite *wr = m_object_cacher->prepare_write( snapc, data, ceph::real_time::min(), op_flags, *journal_tid); - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no, object_off, data.length(), 0); @@ -288,9 +286,8 @@ bool ObjectCacherObjectDispatch::write( ZTracer::Trace trace(parent_trace); *dispatch_result = io::DISPATCH_RESULT_COMPLETE; - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; m_object_cacher->writex(wr, m_object_set, on_dispatched, &trace); - m_cache_lock.Unlock(); return true; } @@ -345,7 +342,7 @@ bool ObjectCacherObjectDispatch::compare_and_write( object_extents.emplace_back(data_object_name(m_image_ctx, object_no), object_no, object_off, cmp_data.length(), 0); - Mutex::Locker cache_locker(m_cache_lock); + std::lock_guard cache_locker{m_cache_lock}; m_object_cacher->flush_set(m_object_set, object_extents, &trace, on_dispatched); return true; @@ -363,7 +360,7 @@ bool ObjectCacherObjectDispatch::flush( on_dispatched = util::create_async_context_callback(*m_image_ctx, on_dispatched); - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) { m_user_flushed = true; if (m_writethrough_until_flush && m_max_dirty > 0) { @@ -374,7 +371,6 @@ bool ObjectCacherObjectDispatch::flush( *dispatch_result = io::DISPATCH_RESULT_CONTINUE; m_object_cacher->flush_set(m_object_set, on_dispatched); - m_cache_lock.Unlock(); return true; } @@ -389,10 +385,9 @@ bool ObjectCacherObjectDispatch::invalidate_cache(Context* on_finish) { // invalidate any remaining cache entries on_finish = new C_InvalidateCache(this, false, on_finish); - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; m_object_cacher->release_set(m_object_set); m_object_cacher->flush_set(m_object_set, on_finish); - m_cache_lock.Unlock(); return true; } @@ -402,10 +397,8 @@ bool ObjectCacherObjectDispatch::reset_existence_cache( auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; - m_cache_lock.Lock(); + std::lock_guard locker{m_cache_lock}; m_object_cacher->clear_nonexistence(m_object_set); - m_cache_lock.Unlock(); - return false; } diff --git a/src/librbd/cache/ObjectCacherObjectDispatch.h b/src/librbd/cache/ObjectCacherObjectDispatch.h index 22a9cf3586f4..2c62e31f07ce 100644 --- a/src/librbd/cache/ObjectCacherObjectDispatch.h +++ b/src/librbd/cache/ObjectCacherObjectDispatch.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H #include "librbd/io/ObjectDispatchInterface.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "osdc/ObjectCacher.h" struct WritebackHandler; @@ -99,7 +99,7 @@ private: size_t m_max_dirty; bool m_writethrough_until_flush; - Mutex m_cache_lock; + ceph::mutex m_cache_lock; ObjectCacher *m_object_cacher = nullptr; ObjectCacher::ObjectSet *m_object_set = nullptr; diff --git a/src/librbd/cache/ObjectCacherWriteback.cc b/src/librbd/cache/ObjectCacherWriteback.cc index a59203b32208..c6e26506a3a1 100644 --- a/src/librbd/cache/ObjectCacherWriteback.cc +++ b/src/librbd/cache/ObjectCacherWriteback.cc @@ -6,7 +6,7 @@ #include "librbd/cache/ObjectCacherWriteback.h" #include "common/ceph_context.h" #include "common/dout.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/WorkQueue.h" #include "osdc/Striper.h" #include "include/Context.h" @@ -42,13 +42,13 @@ namespace cache { */ class C_ReadRequest : public Context { public: - C_ReadRequest(CephContext *cct, Context *c, Mutex *cache_lock) + C_ReadRequest(CephContext *cct, Context *c, ceph::mutex *cache_lock) : m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) { } void finish(int r) override { ldout(m_cct, 20) << "aio_cb completing " << dendl; { - Mutex::Locker cache_locker(*m_cache_lock); + std::lock_guard cache_locker{*m_cache_lock}; m_ctx->complete(r); } ldout(m_cct, 20) << "aio_cb finished" << dendl; @@ -56,7 +56,7 @@ public: private: CephContext *m_cct; Context *m_ctx; - Mutex *m_cache_lock; + ceph::mutex *m_cache_lock; }; class C_OrderedWrite : public Context { @@ -69,7 +69,7 @@ public: void finish(int r) override { ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl; { - Mutex::Locker l(m_wb_handler->m_lock); + std::lock_guard l{m_wb_handler->m_lock}; ceph_assert(!m_result->done); m_result->done = true; m_result->ret = r; @@ -105,7 +105,7 @@ struct C_CommitIOEventExtent : public Context { } }; -ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock) +ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock) : m_tid(0), m_lock(lock), m_ictx(ictx) { } @@ -147,11 +147,11 @@ bool ObjectCacherWriteback::may_copy_on_write(const object_t& oid, uint64_t read_len, snapid_t snapid) { - m_ictx->image_lock.get_read(); + m_ictx->image_lock.lock_shared(); librados::snap_t snap_id = m_ictx->snap_id; uint64_t overlap = 0; m_ictx->get_parent_overlap(snap_id, &overlap); - m_ictx->image_lock.put_read(); + m_ictx->image_lock.unlock_shared(); uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix); @@ -244,7 +244,7 @@ void ObjectCacherWriteback::overwrite_extent(const object_t& oid, uint64_t off, void ObjectCacherWriteback::complete_writes(const std::string& oid) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); std::queue& results = m_writes[oid]; ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl; std::list finished; diff --git a/src/librbd/cache/ObjectCacherWriteback.h b/src/librbd/cache/ObjectCacherWriteback.h index 2addaba5bdf4..6d7f367b614f 100644 --- a/src/librbd/cache/ObjectCacherWriteback.h +++ b/src/librbd/cache/ObjectCacherWriteback.h @@ -9,7 +9,6 @@ #include "osdc/WritebackHandler.h" #include -class Mutex; class Context; namespace librbd { @@ -20,7 +19,7 @@ namespace cache { class ObjectCacherWriteback : public WritebackHandler { public: - ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock); + ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock); // Note that oloc, trunc_size, and trunc_seq are ignored void read(const object_t& oid, uint64_t object_no, @@ -64,7 +63,7 @@ private: void complete_writes(const std::string& oid); ceph_tid_t m_tid; - Mutex& m_lock; + ceph::mutex& m_lock; librbd::ImageCtx *m_ictx; ceph::unordered_map > m_writes; friend class C_OrderedWrite; diff --git a/src/librbd/cache/ParentCacheObjectDispatch.h b/src/librbd/cache/ParentCacheObjectDispatch.h index d576659a1a27..475a5ad15fe9 100644 --- a/src/librbd/cache/ParentCacheObjectDispatch.h +++ b/src/librbd/cache/ParentCacheObjectDispatch.h @@ -4,7 +4,6 @@ #ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H #define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H -#include "common/Mutex.h" #include "librbd/io/ObjectDispatchInterface.h" #include "tools/immutable_object_cache/CacheClient.h" #include "librbd/cache/TypeTraits.h" diff --git a/src/librbd/cache/WriteAroundObjectDispatch.cc b/src/librbd/cache/WriteAroundObjectDispatch.cc index 88e9e218453f..06eab207bcd1 100644 --- a/src/librbd/cache/WriteAroundObjectDispatch.cc +++ b/src/librbd/cache/WriteAroundObjectDispatch.cc @@ -24,8 +24,8 @@ template WriteAroundObjectDispatch::WriteAroundObjectDispatch( I* image_ctx, size_t max_dirty, bool writethrough_until_flush) : m_image_ctx(image_ctx), m_init_max_dirty(max_dirty), m_max_dirty(max_dirty), - m_lock(util::unique_lock_name( - "librbd::cache::WriteAroundObjectDispatch::lock", this)) { + m_lock(ceph::make_mutex(util::unique_lock_name( + "librbd::cache::WriteAroundObjectDispatch::lock", this))) { if (writethrough_until_flush) { m_max_dirty = 0; } @@ -132,7 +132,7 @@ bool WriteAroundObjectDispatch::flush( auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) { m_user_flushed = true; if (m_max_dirty == 0 && m_init_max_dirty > 0) { @@ -173,12 +173,12 @@ bool WriteAroundObjectDispatch::dispatch_unoptimized_io( io::DispatchResult* dispatch_result, Context* on_dispatched) { auto cct = m_image_ctx->cct; - m_lock.Lock(); + m_lock.lock(); auto in_flight_extents_it = m_in_flight_extents.find(object_no); if (in_flight_extents_it == m_in_flight_extents.end() || !in_flight_extents_it->second.intersects(object_off, object_len)) { // no IO in-flight to the specified extent - m_lock.Unlock(); + m_lock.unlock(); return false; } @@ -189,7 +189,7 @@ bool WriteAroundObjectDispatch::dispatch_unoptimized_io( *dispatch_result = io::DISPATCH_RESULT_CONTINUE; m_blocked_unoptimized_ios[object_no].emplace( tid, BlockedIO{object_off, object_len, nullptr, on_dispatched}); - m_lock.Unlock(); + m_lock.unlock(); return true; } @@ -201,16 +201,16 @@ bool WriteAroundObjectDispatch::dispatch_io( Context* on_dispatched) { auto cct = m_image_ctx->cct; - m_lock.Lock(); + m_lock.lock(); if (m_max_dirty == 0) { // write-through mode is active -- no-op the cache - m_lock.Unlock(); + m_lock.unlock(); return false; } if ((op_flags & LIBRADOS_OP_FLAG_FADVISE_FUA) != 0) { // force unit access flag is set -- disable write-around - m_lock.Unlock(); + m_lock.unlock(); return dispatch_unoptimized_io(object_no, object_off, object_len, dispatch_result, on_dispatched); } @@ -231,9 +231,9 @@ bool WriteAroundObjectDispatch::dispatch_io( m_queued_or_blocked_io_tids.insert(tid); m_blocked_ios[object_no].emplace(tid, BlockedIO{object_off, object_len, ctx, on_dispatched}); - m_lock.Unlock(); + m_lock.unlock(); } else if (can_dispatch_io(tid, object_len)) { - m_lock.Unlock(); + m_lock.unlock(); ldout(cct, 20) << "dispatching: tid=" << tid << dendl; on_dispatched->complete(0); @@ -242,7 +242,7 @@ bool WriteAroundObjectDispatch::dispatch_io( ldout(cct, 20) << "queueing: tid=" << tid << dendl; m_queued_or_blocked_io_tids.insert(tid); m_queued_ios.emplace(tid, QueuedIO{object_len, ctx, on_dispatched}); - m_lock.Unlock(); + m_lock.unlock(); } return true; } @@ -264,7 +264,7 @@ void WriteAroundObjectDispatch::unblock_overlapping_ios( uint64_t object_no, uint64_t object_off, uint64_t object_len, Contexts* unoptimized_io_dispatches) { auto cct = m_image_ctx->cct; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto in_flight_extents_it = m_in_flight_extents.find(object_no); ceph_assert(in_flight_extents_it != m_in_flight_extents.end()); @@ -337,7 +337,7 @@ void WriteAroundObjectDispatch::unblock_overlapping_ios( template bool WriteAroundObjectDispatch::can_dispatch_io( uint64_t tid, uint64_t length) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_in_flight_bytes == 0 || m_in_flight_bytes + length <= m_max_dirty) { // no in-flight IO or still under max write-around in-flight limit. @@ -358,7 +358,7 @@ void WriteAroundObjectDispatch::handle_in_flight_io_complete( auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl; - m_lock.Lock(); + m_lock.lock(); m_in_flight_io_tids.erase(tid); ceph_assert(m_in_flight_bytes >= object_len); m_in_flight_bytes -= object_len; @@ -388,7 +388,7 @@ void WriteAroundObjectDispatch::handle_in_flight_io_complete( // collect any queued flushes that were tied to queued IOs auto ready_flushes = collect_ready_flushes(); - m_lock.Unlock(); + m_lock.unlock(); // dispatch any ready unoptimized IOs for (auto& it : unoptimized_io_dispatches) { @@ -424,7 +424,7 @@ void WriteAroundObjectDispatch::handle_in_flight_flush_complete( auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl; - m_lock.Lock(); + m_lock.lock(); // move the in-flight flush to the pending completion list auto it = m_in_flight_flushes.find(tid); @@ -439,7 +439,7 @@ void WriteAroundObjectDispatch::handle_in_flight_flush_complete( if (!finished_flushes.empty()) { std::swap(pending_flush_error, m_pending_flush_error); } - m_lock.Unlock(); + m_lock.unlock(); // complete flushes that were waiting on in-flight IO // (and propogate any IO errors) @@ -453,7 +453,7 @@ void WriteAroundObjectDispatch::handle_in_flight_flush_complete( template typename WriteAroundObjectDispatch::QueuedIOs WriteAroundObjectDispatch::collect_ready_ios() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); QueuedIOs queued_ios; @@ -474,7 +474,7 @@ WriteAroundObjectDispatch::collect_ready_ios() { template typename WriteAroundObjectDispatch::Contexts WriteAroundObjectDispatch::collect_ready_flushes() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); Contexts ready_flushes; auto io_tid_it = m_queued_or_blocked_io_tids.begin(); @@ -497,7 +497,7 @@ WriteAroundObjectDispatch::collect_ready_flushes() { template typename WriteAroundObjectDispatch::Contexts WriteAroundObjectDispatch::collect_finished_flushes() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); Contexts finished_flushes; auto io_tid_it = m_in_flight_io_tids.begin(); diff --git a/src/librbd/cache/WriteAroundObjectDispatch.h b/src/librbd/cache/WriteAroundObjectDispatch.h index 28721ce1172e..ce217d2cf452 100644 --- a/src/librbd/cache/WriteAroundObjectDispatch.h +++ b/src/librbd/cache/WriteAroundObjectDispatch.h @@ -6,7 +6,7 @@ #include "librbd/io/ObjectDispatchInterface.h" #include "include/interval_set.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "librbd/io/Types.h" #include #include @@ -142,7 +142,7 @@ private: size_t m_init_max_dirty; size_t m_max_dirty; - Mutex m_lock; + ceph::mutex m_lock; bool m_user_flushed = false; uint64_t m_last_tid = 0; diff --git a/src/librbd/deep_copy/ImageCopyRequest.cc b/src/librbd/deep_copy/ImageCopyRequest.cc index 32a899b3c932..705ddcb5a4b4 100644 --- a/src/librbd/deep_copy/ImageCopyRequest.cc +++ b/src/librbd/deep_copy/ImageCopyRequest.cc @@ -35,7 +35,7 @@ ImageCopyRequest::ImageCopyRequest(I *src_image_ctx, I *dst_image_ctx, m_snap_id_end(snap_id_end), m_flatten(flatten), m_object_number(object_number), m_snap_seqs(snap_seqs), m_prog_ctx(prog_ctx), m_on_finish(on_finish), m_cct(dst_image_ctx->cct), - m_lock(unique_lock_name("ImageCopyRequest::m_lock", this)) { + m_lock(ceph::make_mutex(unique_lock_name("ImageCopyRequest::m_lock", this))) { } template @@ -53,7 +53,7 @@ void ImageCopyRequest::send() { template void ImageCopyRequest::cancel() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ldout(m_cct, 20) << dendl; m_canceled = true; @@ -68,7 +68,7 @@ void ImageCopyRequest::send_object_copies() { uint64_t size; { - RWLock::RLocker image_locker(m_src_image_ctx->image_lock); + std::shared_lock image_locker{m_src_image_ctx->image_lock}; size = m_src_image_ctx->get_image_size(CEPH_NOSNAP); for (auto snap_id : m_src_image_ctx->snaps) { size = std::max(size, m_src_image_ctx->get_image_size(snap_id)); @@ -81,7 +81,7 @@ void ImageCopyRequest::send_object_copies() { bool complete; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; for (uint64_t i = 0; i < m_src_image_ctx->config.template get_val("rbd_concurrent_management_ops"); ++i) { @@ -100,7 +100,7 @@ void ImageCopyRequest::send_object_copies() { template void ImageCopyRequest::send_next_object_copy() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_canceled && m_ret_val == 0) { ldout(m_cct, 10) << "image copy canceled" << dendl; @@ -132,7 +132,7 @@ void ImageCopyRequest::handle_object_copy(uint64_t object_no, int r) { bool complete; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_current_ops > 0); --m_current_ops; @@ -150,9 +150,9 @@ void ImageCopyRequest::handle_object_copy(uint64_t object_no, int r) { m_copied_objects.pop(); uint64_t progress_object_no = *m_object_number + 1; m_updating_progress = true; - m_lock.Unlock(); + m_lock.unlock(); m_prog_ctx->update_progress(progress_object_no, m_end_object_no); - m_lock.Lock(); + m_lock.lock(); ceph_assert(m_updating_progress); m_updating_progress = false; } diff --git a/src/librbd/deep_copy/ImageCopyRequest.h b/src/librbd/deep_copy/ImageCopyRequest.h index e50507e88485..15c3cd2951d2 100644 --- a/src/librbd/deep_copy/ImageCopyRequest.h +++ b/src/librbd/deep_copy/ImageCopyRequest.h @@ -6,7 +6,7 @@ #include "include/int_types.h" #include "include/rados/librados.hpp" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/RefCountedObj.h" #include "librbd/Types.h" #include "librbd/deep_copy/Types.h" @@ -77,7 +77,7 @@ private: Context *m_on_finish; CephContext *m_cct; - Mutex m_lock; + ceph::mutex m_lock; bool m_canceled = false; uint64_t m_object_no = 0; diff --git a/src/librbd/deep_copy/ObjectCopyRequest.cc b/src/librbd/deep_copy/ObjectCopyRequest.cc index 3a1de0e1c75b..15c647975f30 100644 --- a/src/librbd/deep_copy/ObjectCopyRequest.cc +++ b/src/librbd/deep_copy/ObjectCopyRequest.cc @@ -224,10 +224,10 @@ void ObjectCopyRequest::handle_read_object(int r) { template void ObjectCopyRequest::send_read_from_parent() { - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); io::Extents image_extents; compute_read_from_parent_ops(&image_extents); - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); if (image_extents.empty()) { handle_read_from_parent(0); @@ -380,7 +380,7 @@ void ObjectCopyRequest::send_write_object() { int r; Context *finish_op_ctx; { - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r); } if (finish_op_ctx == nullptr) { @@ -434,14 +434,14 @@ void ObjectCopyRequest::send_update_object_map() { return; } - m_dst_image_ctx->owner_lock.get_read(); - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->owner_lock.lock_shared(); + m_dst_image_ctx->image_lock.lock_shared(); if (m_dst_image_ctx->object_map == nullptr) { // possible that exclusive lock was lost in background lderr(m_cct) << "object map is not initialized" << dendl; - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); finish(-EINVAL); return; } @@ -460,8 +460,8 @@ void ObjectCopyRequest::send_update_object_map() { auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r); if (finish_op_ctx == nullptr) { lderr(m_cct) << "lost exclusive lock" << dendl; - m_dst_image_ctx->image_lock.put_read(); - m_dst_image_ctx->owner_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); + m_dst_image_ctx->owner_lock.unlock_shared(); finish(r); return; } @@ -477,8 +477,8 @@ void ObjectCopyRequest::send_update_object_map() { {}, {}, false, ctx); // NOTE: state machine might complete before we reach here - dst_image_ctx->image_lock.put_read(); - dst_image_ctx->owner_lock.put_read(); + dst_image_ctx->image_lock.unlock_shared(); + dst_image_ctx->owner_lock.unlock_shared(); if (!sent) { ceph_assert(dst_snap_id == CEPH_NOSNAP); ctx->complete(0); @@ -503,8 +503,9 @@ void ObjectCopyRequest::handle_update_object_map(int r) { } template -Context *ObjectCopyRequest::start_lock_op(RWLock &owner_lock, int* r) { - ceph_assert(m_dst_image_ctx->owner_lock.is_locked()); +Context *ObjectCopyRequest::start_lock_op(ceph::shared_mutex &owner_lock, + int* r) { + ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock)); if (m_dst_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } @@ -573,9 +574,9 @@ void ObjectCopyRequest::compute_read_ops() { m_read_snaps = {}; m_zero_interval = {}; - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); bool hide_parent = (m_src_image_ctx->parent != nullptr); - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); librados::snap_t src_copy_point_snap_id = m_snap_map.rbegin()->first; bool prev_exists = hide_parent; @@ -712,7 +713,7 @@ void ObjectCopyRequest::compute_read_ops() { template void ObjectCopyRequest::compute_read_from_parent_ops( io::Extents *parent_image_extents) { - assert(m_src_image_ctx->image_lock.is_locked()); + assert(ceph_mutex_is_locked(m_src_image_ctx->image_lock)); m_read_ops = {}; m_zero_interval = {}; @@ -845,9 +846,9 @@ void ObjectCopyRequest::compute_zero_ops() { bool fast_diff = m_dst_image_ctx->test_features(RBD_FEATURE_FAST_DIFF); uint64_t prev_end_size = 0; - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); bool hide_parent = (m_src_image_ctx->parent != nullptr); - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); for (auto &it : m_dst_zero_interval) { auto src_snap_seq = it.first; @@ -867,7 +868,7 @@ void ObjectCopyRequest::compute_zero_ops() { } if (hide_parent) { - RWLock::RLocker image_locker(m_dst_image_ctx->image_lock); + std::shared_lock image_locker{m_dst_image_ctx->image_lock}; uint64_t parent_overlap = 0; int r = m_dst_image_ctx->get_parent_overlap(dst_snap_seq, &parent_overlap); @@ -966,7 +967,7 @@ void ObjectCopyRequest::finish(int r) { template void ObjectCopyRequest::compute_dst_object_may_exist() { - RWLock::RLocker image_locker(m_dst_image_ctx->image_lock); + std::shared_lock image_locker{m_dst_image_ctx->image_lock}; auto snap_ids = m_dst_image_ctx->snaps; snap_ids.push_back(CEPH_NOSNAP); diff --git a/src/librbd/deep_copy/ObjectCopyRequest.h b/src/librbd/deep_copy/ObjectCopyRequest.h index 6df98e695ad0..a1dccf5d770a 100644 --- a/src/librbd/deep_copy/ObjectCopyRequest.h +++ b/src/librbd/deep_copy/ObjectCopyRequest.h @@ -181,7 +181,7 @@ private: void send_update_object_map(); void handle_update_object_map(int r); - Context *start_lock_op(RWLock &owner_lock, int* r); + Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r); uint64_t src_to_dst_object_offset(uint64_t objectno, uint64_t offset); diff --git a/src/librbd/deep_copy/SetHeadRequest.cc b/src/librbd/deep_copy/SetHeadRequest.cc index 654ac33071dc..a6c43dfedd6f 100644 --- a/src/librbd/deep_copy/SetHeadRequest.cc +++ b/src/librbd/deep_copy/SetHeadRequest.cc @@ -39,13 +39,13 @@ void SetHeadRequest::send() { template void SetHeadRequest::send_set_size() { - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); if (m_image_ctx->size == m_size) { - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); send_detach_parent(); return; } - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ldout(m_cct, 20) << dendl; @@ -87,7 +87,7 @@ void SetHeadRequest::handle_set_size(int r) { { // adjust in-memory image size now that it's updated on disk - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; if (m_image_ctx->size > m_size) { if (m_image_ctx->parent_md.spec.pool_id != -1 && m_image_ctx->parent_md.overlap > m_size) { @@ -102,15 +102,15 @@ void SetHeadRequest::handle_set_size(int r) { template void SetHeadRequest::send_detach_parent() { - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); if (m_image_ctx->parent_md.spec.pool_id == -1 || (m_image_ctx->parent_md.spec == m_parent_spec && m_image_ctx->parent_md.overlap == m_parent_overlap)) { - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); send_attach_parent(); return; } - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ldout(m_cct, 20) << dendl; @@ -142,7 +142,7 @@ void SetHeadRequest::handle_detach_parent(int r) { { // adjust in-memory parent now that it's updated on disk - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; m_image_ctx->parent_md.spec = {}; m_image_ctx->parent_md.overlap = 0; } @@ -152,14 +152,14 @@ void SetHeadRequest::handle_detach_parent(int r) { template void SetHeadRequest::send_attach_parent() { - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); if (m_image_ctx->parent_md.spec == m_parent_spec && m_image_ctx->parent_md.overlap == m_parent_overlap) { - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); finish(0); return; } - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ldout(m_cct, 20) << dendl; @@ -192,7 +192,7 @@ void SetHeadRequest::handle_attach_parent(int r) { { // adjust in-memory parent now that it's updated on disk - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; m_image_ctx->parent_md.spec = m_parent_spec; m_image_ctx->parent_md.overlap = m_parent_overlap; } @@ -202,7 +202,7 @@ void SetHeadRequest::handle_attach_parent(int r) { template Context *SetHeadRequest::start_lock_op(int* r) { - RWLock::RLocker owner_locker(m_image_ctx->owner_lock); + std::shared_lock owner_locker{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.cc b/src/librbd/deep_copy/SnapshotCopyRequest.cc index de57ddb144b1..e53386b7148e 100644 --- a/src/librbd/deep_copy/SnapshotCopyRequest.cc +++ b/src/librbd/deep_copy/SnapshotCopyRequest.cc @@ -53,7 +53,7 @@ SnapshotCopyRequest::SnapshotCopyRequest(I *src_image_ctx, m_dst_image_ctx(dst_image_ctx), m_snap_id_end(snap_id_end), m_flatten(flatten), m_work_queue(work_queue), m_snap_seqs_result(snap_seqs), m_snap_seqs(*snap_seqs), m_on_finish(on_finish), m_cct(dst_image_ctx->cct), - m_lock(unique_lock_name("SnapshotCopyRequest::m_lock", this)) { + m_lock(ceph::make_mutex(unique_lock_name("SnapshotCopyRequest::m_lock", this))) { // snap ids ordered from oldest to newest m_src_snap_ids.insert(src_image_ctx->snaps.begin(), src_image_ctx->snaps.end()); @@ -87,7 +87,7 @@ void SnapshotCopyRequest::send() { template void SnapshotCopyRequest::cancel() { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ldout(m_cct, 20) << dendl; m_canceled = true; @@ -104,18 +104,18 @@ void SnapshotCopyRequest::send_snap_unprotect() { for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) { librados::snap_t dst_snap_id = *snap_id_it; - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->image_lock.lock_shared(); bool dst_unprotected; int r = m_dst_image_ctx->is_snap_unprotected(dst_snap_id, &dst_unprotected); if (r < 0) { lderr(m_cct) << "failed to retrieve destination snap unprotect status: " << cpp_strerror(r) << dendl; - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); finish(r); return; } - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); if (dst_unprotected) { // snap is already unprotected -- check next snap @@ -131,7 +131,7 @@ void SnapshotCopyRequest::send_snap_unprotect() { }); if (snap_seq_it != m_snap_seqs.end()) { - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); bool src_unprotected; r = m_src_image_ctx->is_snap_unprotected(snap_seq_it->first, &src_unprotected); @@ -145,11 +145,11 @@ void SnapshotCopyRequest::send_snap_unprotect() { if (r < 0) { lderr(m_cct) << "failed to retrieve source snap unprotect status: " << cpp_strerror(r) << dendl; - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); finish(r); return; } - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); if (src_unprotected) { // source is unprotected -- unprotect destination snap @@ -186,7 +186,7 @@ void SnapshotCopyRequest::send_snap_unprotect() { handle_snap_unprotect(r); finish_op_ctx->complete(0); }); - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; m_dst_image_ctx->operations->execute_snap_unprotect( cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx); } @@ -204,7 +204,7 @@ void SnapshotCopyRequest::handle_snap_unprotect(int r) { { // avoid the need to refresh to delete the newly unprotected snapshot - RWLock::RLocker image_locker(m_dst_image_ctx->image_lock); + std::shared_lock image_locker{m_dst_image_ctx->image_lock}; auto snap_info_it = m_dst_image_ctx->snap_info.find(m_prev_snap_id); if (snap_info_it != m_dst_image_ctx->snap_info.end()) { snap_info_it->second.protection_status = @@ -230,9 +230,9 @@ void SnapshotCopyRequest::send_snap_remove() { librados::snap_t dst_snap_id = *snap_id_it; cls::rbd::SnapshotNamespace snap_namespace; - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->image_lock.lock_shared(); int r = m_dst_image_ctx->get_snap_namespace(dst_snap_id, &snap_namespace); - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); if (r < 0) { lderr(m_cct) << "failed to retrieve destination snap namespace: " << m_snap_name << dendl; @@ -283,7 +283,7 @@ void SnapshotCopyRequest::send_snap_remove() { handle_snap_remove(r); finish_op_ctx->complete(0); }); - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; m_dst_image_ctx->operations->execute_snap_remove( cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx); } @@ -316,9 +316,9 @@ void SnapshotCopyRequest::send_snap_create() { librados::snap_t src_snap_id = *snap_id_it; cls::rbd::SnapshotNamespace snap_namespace; - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); int r = m_src_image_ctx->get_snap_namespace(src_snap_id, &snap_namespace); - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); if (r < 0) { lderr(m_cct) << "failed to retrieve source snap namespace: " << m_snap_name << dendl; @@ -343,10 +343,10 @@ void SnapshotCopyRequest::send_snap_create() { m_prev_snap_id = *snap_id_it; m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id); - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); auto snap_info_it = m_src_image_ctx->snap_info.find(m_prev_snap_id); if (snap_info_it == m_src_image_ctx->snap_info.end()) { - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); lderr(m_cct) << "failed to retrieve source snap info: " << m_snap_name << dendl; finish(-ENOENT); @@ -361,7 +361,7 @@ void SnapshotCopyRequest::send_snap_create() { parent_spec = m_dst_parent_spec; parent_overlap = snap_info_it->second.parent.overlap; } - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", " << "snap_id=" << m_prev_snap_id << ", " @@ -428,18 +428,18 @@ void SnapshotCopyRequest::send_snap_protect() { for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) { librados::snap_t src_snap_id = *snap_id_it; - m_src_image_ctx->image_lock.get_read(); + m_src_image_ctx->image_lock.lock_shared(); bool src_protected; int r = m_src_image_ctx->is_snap_protected(src_snap_id, &src_protected); if (r < 0) { lderr(m_cct) << "failed to retrieve source snap protect status: " << cpp_strerror(r) << dendl; - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); finish(r); return; } - m_src_image_ctx->image_lock.put_read(); + m_src_image_ctx->image_lock.unlock_shared(); if (!src_protected) { // snap is not protected -- check next snap @@ -450,17 +450,17 @@ void SnapshotCopyRequest::send_snap_protect() { auto snap_seq_it = m_snap_seqs.find(src_snap_id); ceph_assert(snap_seq_it != m_snap_seqs.end()); - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->image_lock.lock_shared(); bool dst_protected; r = m_dst_image_ctx->is_snap_protected(snap_seq_it->second, &dst_protected); if (r < 0) { lderr(m_cct) << "failed to retrieve destination snap protect status: " << cpp_strerror(r) << dendl; - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); finish(r); return; } - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); if (!dst_protected) { break; @@ -492,7 +492,7 @@ void SnapshotCopyRequest::send_snap_protect() { handle_snap_protect(r); finish_op_ctx->complete(0); }); - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; m_dst_image_ctx->operations->execute_snap_protect( cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx); } @@ -527,7 +527,7 @@ void SnapshotCopyRequest::send_set_head() { cls::rbd::ParentImageSpec parent_spec; uint64_t parent_overlap = 0; { - RWLock::RLocker src_locker(m_src_image_ctx->image_lock); + std::shared_lock src_locker{m_src_image_ctx->image_lock}; size = m_src_image_ctx->size; if (!m_flatten) { parent_spec = m_src_image_ctx->parent_md.spec; @@ -565,8 +565,8 @@ void SnapshotCopyRequest::send_resize_object_map() { if (m_snap_id_end == CEPH_NOSNAP && m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) { - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); - RWLock::RLocker image_locker(m_dst_image_ctx->image_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; + std::shared_lock image_locker{m_dst_image_ctx->image_lock}; if (m_dst_image_ctx->object_map != nullptr && Striper::get_num_objects(m_dst_image_ctx->layout, @@ -611,7 +611,7 @@ void SnapshotCopyRequest::handle_resize_object_map(int r) { template bool SnapshotCopyRequest::handle_cancellation() { { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (!m_canceled) { return false; } @@ -631,8 +631,8 @@ void SnapshotCopyRequest::error(int r) { template int SnapshotCopyRequest::validate_parent(I *image_ctx, cls::rbd::ParentImageSpec *spec) { - RWLock::RLocker owner_locker(image_ctx->owner_lock); - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock owner_locker{image_ctx->owner_lock}; + std::shared_lock image_locker{image_ctx->image_lock}; // ensure source image's parent specs are still consistent *spec = image_ctx->parent_md.spec; @@ -654,13 +654,13 @@ int SnapshotCopyRequest::validate_parent(I *image_ctx, template Context *SnapshotCopyRequest::start_lock_op(int* r) { - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; return start_lock_op(m_dst_image_ctx->owner_lock, r); } template -Context *SnapshotCopyRequest::start_lock_op(RWLock &owner_lock, int* r) { - ceph_assert(m_dst_image_ctx->owner_lock.is_locked()); +Context *SnapshotCopyRequest::start_lock_op(ceph::shared_mutex &owner_lock, int* r) { + ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock)); if (m_dst_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.h b/src/librbd/deep_copy/SnapshotCopyRequest.h index 1e4f3badba0c..ede5a76758fe 100644 --- a/src/librbd/deep_copy/SnapshotCopyRequest.h +++ b/src/librbd/deep_copy/SnapshotCopyRequest.h @@ -99,7 +99,7 @@ private: cls::rbd::ParentImageSpec m_dst_parent_spec; - Mutex m_lock; + ceph::mutex m_lock; bool m_canceled = false; void send_snap_unprotect(); @@ -127,7 +127,7 @@ private: int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec); Context *start_lock_op(int* r); - Context *start_lock_op(RWLock &owner_locki, int* r); + Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r); void finish(int r); }; diff --git a/src/librbd/deep_copy/SnapshotCreateRequest.cc b/src/librbd/deep_copy/SnapshotCreateRequest.cc index 2d5fc58d8d60..5f4ae5121ddf 100644 --- a/src/librbd/deep_copy/SnapshotCreateRequest.cc +++ b/src/librbd/deep_copy/SnapshotCreateRequest.cc @@ -80,7 +80,7 @@ void SnapshotCreateRequest::send_create_snap() { handle_create_snap(r); finish_op_ctx->complete(0); }); - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; m_dst_image_ctx->operations->execute_snap_create(m_snap_namespace, m_snap_name.c_str(), ctx, @@ -108,17 +108,17 @@ void SnapshotCreateRequest::send_create_object_map() { return; } - m_dst_image_ctx->image_lock.get_read(); + m_dst_image_ctx->image_lock.lock_shared(); auto snap_it = m_dst_image_ctx->snap_ids.find( {cls::rbd::UserSnapshotNamespace(), m_snap_name}); if (snap_it == m_dst_image_ctx->snap_ids.end()) { lderr(m_cct) << "failed to locate snap: " << m_snap_name << dendl; - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); finish(-ENOENT); return; } librados::snap_t local_snap_id = snap_it->second; - m_dst_image_ctx->image_lock.put_read(); + m_dst_image_ctx->image_lock.unlock_shared(); std::string object_map_oid(librbd::ObjectMap<>::object_map_name( m_dst_image_ctx->id, local_snap_id)); @@ -166,7 +166,7 @@ void SnapshotCreateRequest::handle_create_object_map(int r) { template Context *SnapshotCreateRequest::start_lock_op(int* r) { - RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock); + std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; if (m_dst_image_ctx->exclusive_lock == nullptr) { return new FunctionContext([](int r) {}); } diff --git a/src/librbd/exclusive_lock/AutomaticPolicy.cc b/src/librbd/exclusive_lock/AutomaticPolicy.cc index 4d5f48b1527d..bfaddc1b2ba3 100644 --- a/src/librbd/exclusive_lock/AutomaticPolicy.cc +++ b/src/librbd/exclusive_lock/AutomaticPolicy.cc @@ -13,7 +13,7 @@ namespace librbd { namespace exclusive_lock { int AutomaticPolicy::lock_requested(bool force) { - ceph_assert(m_image_ctx->owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force diff --git a/src/librbd/exclusive_lock/PostAcquireRequest.cc b/src/librbd/exclusive_lock/PostAcquireRequest.cc index 3d20d4b2aa02..7b41fab8e36f 100644 --- a/src/librbd/exclusive_lock/PostAcquireRequest.cc +++ b/src/librbd/exclusive_lock/PostAcquireRequest.cc @@ -108,7 +108,7 @@ void PostAcquireRequest::send_open_journal() { bool journal_enabled; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING, m_image_ctx.image_lock) && !m_image_ctx.get_journal_policy()->journal_disabled()); @@ -153,7 +153,7 @@ void PostAcquireRequest::send_allocate_journal_tag() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; using klass = PostAcquireRequest; Context *ctx = create_context_callback< klass, &klass::handle_allocate_journal_tag>(this); @@ -272,7 +272,7 @@ void PostAcquireRequest::handle_close_object_map(int r) { template void PostAcquireRequest::apply() { { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; ceph_assert(m_image_ctx.object_map == nullptr); m_image_ctx.object_map = m_object_map; @@ -286,7 +286,7 @@ void PostAcquireRequest::apply() { template void PostAcquireRequest::revert() { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; m_image_ctx.object_map = nullptr; m_image_ctx.journal = nullptr; diff --git a/src/librbd/exclusive_lock/PreReleaseRequest.cc b/src/librbd/exclusive_lock/PreReleaseRequest.cc index 346c4f39e0d3..6632550f7e29 100644 --- a/src/librbd/exclusive_lock/PreReleaseRequest.cc +++ b/src/librbd/exclusive_lock/PreReleaseRequest.cc @@ -109,7 +109,7 @@ void PreReleaseRequest::send_block_writes() { klass, &klass::handle_block_writes>(this); { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; // setting the lock as required will automatically cause the IO // queue to re-request the lock if any IO is queued if (m_image_ctx.clone_copy_on_read || @@ -165,7 +165,7 @@ void PreReleaseRequest::send_invalidate_cache() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << dendl; - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; Context *ctx = create_context_callback< PreReleaseRequest, &PreReleaseRequest::handle_invalidate_cache>(this); @@ -212,7 +212,7 @@ void PreReleaseRequest::handle_flush_notifies(int r) { template void PreReleaseRequest::send_close_journal() { { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; std::swap(m_journal, m_image_ctx.journal); } @@ -248,7 +248,7 @@ void PreReleaseRequest::handle_close_journal(int r) { template void PreReleaseRequest::send_close_object_map() { { - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; std::swap(m_object_map, m_image_ctx.object_map); } diff --git a/src/librbd/exclusive_lock/StandardPolicy.cc b/src/librbd/exclusive_lock/StandardPolicy.cc index 6bdb313b3696..227c40815f95 100644 --- a/src/librbd/exclusive_lock/StandardPolicy.cc +++ b/src/librbd/exclusive_lock/StandardPolicy.cc @@ -13,7 +13,7 @@ namespace librbd { namespace exclusive_lock { int StandardPolicy::lock_requested(bool force) { - ceph_assert(m_image_ctx->owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock)); ceph_assert(m_image_ctx->exclusive_lock != nullptr); ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force diff --git a/src/librbd/image/AttachChildRequest.cc b/src/librbd/image/AttachChildRequest.cc index ac709aa0f3e7..d3920608d00c 100644 --- a/src/librbd/image/AttachChildRequest.cc +++ b/src/librbd/image/AttachChildRequest.cc @@ -93,7 +93,7 @@ void AttachChildRequest::handle_v1_refresh(int r) { bool snap_protected = false; if (r == 0) { - RWLock::RLocker image_locker(m_parent_image_ctx->image_lock); + std::shared_lock image_locker{m_parent_image_ctx->image_lock}; r = m_parent_image_ctx->is_snap_protected(m_parent_snap_id, &snap_protected); } diff --git a/src/librbd/image/CloneRequest.cc b/src/librbd/image/CloneRequest.cc index d430680aed87..d5f2acd5fb13 100644 --- a/src/librbd/image/CloneRequest.cc +++ b/src/librbd/image/CloneRequest.cc @@ -189,13 +189,13 @@ void CloneRequest::validate_parent() { return; } - m_parent_image_ctx->image_lock.get_read(); + m_parent_image_ctx->image_lock.lock_shared(); uint64_t p_features = m_parent_image_ctx->features; m_size = m_parent_image_ctx->get_image_size(m_parent_image_ctx->snap_id); bool snap_protected; int r = m_parent_image_ctx->is_snap_protected(m_parent_image_ctx->snap_id, &snap_protected); - m_parent_image_ctx->image_lock.put_read(); + m_parent_image_ctx->image_lock.unlock_shared(); if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) { lderr(m_cct) << "parent image must support layering" << dendl; @@ -276,7 +276,7 @@ void CloneRequest::create_child() { Context *ctx = create_context_callback< klass, &klass::handle_create_child>(this); - RWLock::RLocker image_locker(m_parent_image_ctx->image_lock); + std::shared_lock image_locker{m_parent_image_ctx->image_lock}; CreateRequest *req = CreateRequest::create( m_config, m_ioctx, m_name, m_id, m_size, m_opts, m_non_primary_global_image_id, m_primary_mirror_uuid, true, diff --git a/src/librbd/image/CloseRequest.cc b/src/librbd/image/CloseRequest.cc index 33fa3bb6bb1f..a92c96c4190c 100644 --- a/src/librbd/image/CloseRequest.cc +++ b/src/librbd/image/CloseRequest.cc @@ -89,7 +89,7 @@ void CloseRequest::send_shut_down_io_queue() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(m_image_ctx->owner_lock); + std::shared_lock owner_locker{m_image_ctx->owner_lock}; m_image_ctx->io_work_queue->shut_down(create_context_callback< CloseRequest, &CloseRequest::handle_shut_down_io_queue>(this)); } @@ -105,11 +105,11 @@ void CloseRequest::handle_shut_down_io_queue(int r) { template void CloseRequest::send_shut_down_exclusive_lock() { { - RWLock::WLocker owner_locker(m_image_ctx->owner_lock); + std::unique_lock owner_locker{m_image_ctx->owner_lock}; m_exclusive_lock = m_image_ctx->exclusive_lock; // if reading a snapshot -- possible object map is open - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; if (m_exclusive_lock == nullptr) { delete m_image_ctx->object_map; m_image_ctx->object_map = nullptr; @@ -136,11 +136,11 @@ void CloseRequest::handle_shut_down_exclusive_lock(int r) { ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; { - RWLock::RLocker owner_locker(m_image_ctx->owner_lock); + std::shared_lock owner_locker{m_image_ctx->owner_lock}; ceph_assert(m_image_ctx->exclusive_lock == nullptr); // object map and journal closed during exclusive lock shutdown - RWLock::RLocker image_locker(m_image_ctx->image_lock); + std::shared_lock image_locker{m_image_ctx->image_lock}; ceph_assert(m_image_ctx->journal == nullptr); ceph_assert(m_image_ctx->object_map == nullptr); } @@ -162,7 +162,7 @@ void CloseRequest::send_flush() { CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(m_image_ctx->owner_lock); + std::shared_lock owner_locker{m_image_ctx->owner_lock}; auto ctx = create_context_callback< CloseRequest, &CloseRequest::handle_flush>(this); auto aio_comp = io::AioCompletion::create_and_start(ctx, m_image_ctx, diff --git a/src/librbd/image/DetachChildRequest.cc b/src/librbd/image/DetachChildRequest.cc index 242f12165fc7..584a359017c7 100644 --- a/src/librbd/image/DetachChildRequest.cc +++ b/src/librbd/image/DetachChildRequest.cc @@ -32,7 +32,7 @@ DetachChildRequest::~DetachChildRequest() { template void DetachChildRequest::send() { { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; // use oldest snapshot or HEAD for parent spec if (!m_image_ctx.snap_info.empty()) { diff --git a/src/librbd/image/ListWatchersRequest.cc b/src/librbd/image/ListWatchersRequest.cc index ad3a20caf0ba..594c14445ee9 100644 --- a/src/librbd/image/ListWatchersRequest.cc +++ b/src/librbd/image/ListWatchersRequest.cc @@ -118,7 +118,7 @@ void ListWatchersRequest::finish(int r) { m_watchers->clear(); if (m_object_watchers.size() > 0) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; uint64_t watch_handle = m_image_ctx.image_watcher != nullptr ? m_image_ctx.image_watcher->get_watch_handle() : 0; diff --git a/src/librbd/image/OpenRequest.cc b/src/librbd/image/OpenRequest.cc index fb1ada777def..69269f59f461 100644 --- a/src/librbd/image/OpenRequest.cc +++ b/src/librbd/image/OpenRequest.cc @@ -638,7 +638,7 @@ Context *OpenRequest::send_set_snap(int *result) { uint64_t snap_id = CEPH_NOSNAP; std::swap(m_image_ctx->open_snap_id, snap_id); if (snap_id == CEPH_NOSNAP) { - RWLock::RLocker image_locker(m_image_ctx->image_lock); + std::shared_lock image_locker{m_image_ctx->image_lock}; snap_id = m_image_ctx->get_snap_id(m_image_ctx->snap_namespace, m_image_ctx->snap_name); } diff --git a/src/librbd/image/PreRemoveRequest.cc b/src/librbd/image/PreRemoveRequest.cc index 5c664c217279..ef66a6ad7748 100644 --- a/src/librbd/image/PreRemoveRequest.cc +++ b/src/librbd/image/PreRemoveRequest.cc @@ -52,7 +52,7 @@ void PreRemoveRequest::send() { template void PreRemoveRequest::acquire_exclusive_lock() { - RWLock::RLocker owner_lock(m_image_ctx->owner_lock); + std::shared_lock owner_lock{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { validate_image_removal(); return; @@ -64,7 +64,7 @@ void PreRemoveRequest::acquire_exclusive_lock() { // do not attempt to open the journal when removing the image in case // it's corrupt if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) { - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; m_image_ctx->set_journal_policy(new journal::DisabledPolicy()); } @@ -136,19 +136,19 @@ void PreRemoveRequest::check_image_snaps() { auto cct = m_image_ctx->cct; ldout(cct, 5) << dendl; - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); for (auto& snap_info : m_image_ctx->snap_info) { if (auto_delete_snapshot(snap_info.second)) { m_snap_infos.insert(snap_info); } else { - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); ldout(cct, 5) << "image has snapshots - not removing" << dendl; finish(-ENOTEMPTY); return; } } - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); list_image_watchers(); } @@ -257,7 +257,7 @@ void PreRemoveRequest::remove_snapshot() { ldout(cct, 20) << "snap_id=" << snap_id << ", " << "snap_name=" << snap_info.name << dendl; - RWLock::RLocker owner_lock(m_image_ctx->owner_lock); + std::shared_lock owner_lock{m_image_ctx->owner_lock}; auto ctx = create_context_callback< PreRemoveRequest, &PreRemoveRequest::handle_remove_snapshot>(this); auto req = librbd::operation::SnapshotRemoveRequest::create( diff --git a/src/librbd/image/RefreshParentRequest.cc b/src/librbd/image/RefreshParentRequest.cc index 02b99b902025..88084d13b773 100644 --- a/src/librbd/image/RefreshParentRequest.cc +++ b/src/librbd/image/RefreshParentRequest.cc @@ -36,7 +36,7 @@ template bool RefreshParentRequest::is_refresh_required( I &child_image_ctx, const ParentImageInfo &parent_md, const MigrationInfo &migration_info) { - ceph_assert(child_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(child_image_ctx.image_lock)); return (is_open_required(child_image_ctx, parent_md, migration_info) || is_close_required(child_image_ctx, parent_md, migration_info)); } @@ -89,7 +89,7 @@ void RefreshParentRequest::send() { template void RefreshParentRequest::apply() { - ceph_assert(m_child_image_ctx.image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(m_child_image_ctx.image_lock)); std::swap(m_child_image_ctx.parent, m_parent_image_ctx); } diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc index ba9be9beb8f8..d521e33bff61 100644 --- a/src/librbd/image/RefreshRequest.cc +++ b/src/librbd/image/RefreshRequest.cc @@ -336,7 +336,7 @@ void RefreshRequest::send_v2_get_mutable_metadata() { uint64_t snap_id; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; snap_id = m_image_ctx.snap_id; } @@ -807,7 +807,7 @@ Context *RefreshRequest::handle_v2_get_snapshots(int *result) { template void RefreshRequest::send_v2_refresh_parent() { { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; ParentImageInfo parent_md; MigrationInfo migration_info; @@ -870,7 +870,7 @@ void RefreshRequest::send_v2_init_exclusive_lock() { Context *ctx = create_context_callback< klass, &klass::handle_v2_init_exclusive_lock>(this); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_exclusive_lock->init(m_features, ctx); } @@ -902,7 +902,7 @@ void RefreshRequest::send_v2_open_journal() { !m_image_ctx.exclusive_lock->is_lock_owner()); bool journal_disabled_by_policy; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; journal_disabled_by_policy = ( !journal_disabled && m_image_ctx.get_journal_policy()->journal_disabled()); @@ -954,7 +954,7 @@ template void RefreshRequest::send_v2_block_writes() { bool disabled_journaling = false; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 && (m_features & RBD_FEATURE_JOURNALING) == 0 && m_image_ctx.journal != nullptr); @@ -974,7 +974,7 @@ void RefreshRequest::send_v2_block_writes() { Context *ctx = create_context_callback< RefreshRequest, &RefreshRequest::handle_v2_block_writes>(this); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_work_queue->block_writes(ctx); } @@ -1135,7 +1135,7 @@ Context *RefreshRequest::handle_v2_shut_down_exclusive_lock(int *result) { } { - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); + std::unique_lock owner_locker{m_image_ctx.owner_lock}; ceph_assert(m_image_ctx.exclusive_lock == nullptr); } @@ -1230,7 +1230,7 @@ Context *RefreshRequest::send_flush_aio() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; auto ctx = create_context_callback< RefreshRequest, &RefreshRequest::handle_flush_aio>(this); auto aio_comp = io::AioCompletion::create_and_start( @@ -1280,8 +1280,7 @@ void RefreshRequest::apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock}; m_image_ctx.size = m_size; m_image_ctx.lockers = m_lockers; diff --git a/src/librbd/image/RemoveRequest.cc b/src/librbd/image/RemoveRequest.cc index 865df16d74e6..f1fe0d96cd7b 100644 --- a/src/librbd/image/RemoveRequest.cc +++ b/src/librbd/image/RemoveRequest.cc @@ -141,7 +141,7 @@ void RemoveRequest::trim_image() { *m_image_ctx, create_context_callback< klass, &klass::handle_trim_image>(this)); - RWLock::RLocker owner_lock(m_image_ctx->owner_lock); + std::shared_lock owner_lock{m_image_ctx->owner_lock}; auto req = librbd::operation::TrimRequest::create( *m_image_ctx, ctx, m_image_ctx->size, 0, m_prog_ctx); req->send(); diff --git a/src/librbd/image/SetFlagsRequest.cc b/src/librbd/image/SetFlagsRequest.cc index 22b009008a1b..fa00ed981dc9 100644 --- a/src/librbd/image/SetFlagsRequest.cc +++ b/src/librbd/image/SetFlagsRequest.cc @@ -36,7 +36,7 @@ void SetFlagsRequest::send_set_flags() { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; std::vector snap_ids; snap_ids.push_back(CEPH_NOSNAP); for (auto it : m_image_ctx->snap_info) { diff --git a/src/librbd/image/SetFlagsRequest.h b/src/librbd/image/SetFlagsRequest.h index 36905e625c9b..be67e176a691 100644 --- a/src/librbd/image/SetFlagsRequest.h +++ b/src/librbd/image/SetFlagsRequest.h @@ -5,7 +5,6 @@ #define CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H #include "include/buffer.h" -#include "common/Mutex.h" #include #include diff --git a/src/librbd/image/SetSnapRequest.cc b/src/librbd/image/SetSnapRequest.cc index c8e029ff7aa0..43e7d7d67622 100644 --- a/src/librbd/image/SetSnapRequest.cc +++ b/src/librbd/image/SetSnapRequest.cc @@ -48,7 +48,7 @@ void SetSnapRequest::send() { template void SetSnapRequest::send_init_exclusive_lock() { { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if (m_image_ctx.exclusive_lock != nullptr) { ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP); send_complete(); @@ -74,7 +74,7 @@ void SetSnapRequest::send_init_exclusive_lock() { Context *ctx = create_context_callback< klass, &klass::handle_init_exclusive_lock>(this); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_exclusive_lock->init(m_image_ctx.features, ctx); } @@ -103,7 +103,7 @@ void SetSnapRequest::send_block_writes() { Context *ctx = create_context_callback< klass, &klass::handle_block_writes>(this); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; m_image_ctx.io_work_queue->block_writes(ctx); } @@ -120,7 +120,7 @@ Context *SetSnapRequest::handle_block_writes(int *result) { } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; auto it = m_image_ctx.snap_info.find(m_snap_id); if (it == m_image_ctx.snap_info.end()) { ldout(cct, 5) << "failed to locate snapshot '" << m_snap_id << "'" @@ -138,7 +138,7 @@ Context *SetSnapRequest::handle_block_writes(int *result) { template Context *SetSnapRequest::send_shut_down_exclusive_lock(int *result) { { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; m_exclusive_lock = m_image_ctx.exclusive_lock; } @@ -178,7 +178,7 @@ Context *SetSnapRequest::send_refresh_parent(int *result) { ParentImageInfo parent_md; bool refresh_parent; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; const auto parent_info = m_image_ctx.get_parent_info(m_snap_id); if (parent_info == nullptr) { @@ -323,8 +323,7 @@ int SetSnapRequest::apply() { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << __func__ << dendl; - RWLock::WLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock}; if (m_snap_id != CEPH_NOSNAP) { ceph_assert(m_image_ctx.exclusive_lock == nullptr); int r = m_image_ctx.snap_set(m_snap_id); diff --git a/src/librbd/image_watcher/NotifyLockOwner.cc b/src/librbd/image_watcher/NotifyLockOwner.cc index ead5f214c223..1d34106f9d93 100644 --- a/src/librbd/image_watcher/NotifyLockOwner.cc +++ b/src/librbd/image_watcher/NotifyLockOwner.cc @@ -36,7 +36,7 @@ void NotifyLockOwner::send_notify() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); m_notifier.notify(m_bl, &m_notify_response, create_context_callback< NotifyLockOwner, &NotifyLockOwner::handle_notify>(this)); } diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc index 1cefd073b528..9660a0ef4f0d 100644 --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@ -171,9 +171,10 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize) { int obj_order = ictx->order; - ictx->image_lock.get_read(); - info.size = ictx->get_image_size(ictx->snap_id); - ictx->image_lock.put_read(); + { + std::shared_lock locker{ictx->image_lock}; + info.size = ictx->get_image_size(ictx->snap_id); + } info.obj_size = 1ULL << obj_order; info.num_objs = Striper::get_num_objects(ictx->layout, info.size); info.order = obj_order; @@ -198,15 +199,15 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx) { - ceph_assert(ictx->owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(ictx->owner_lock)); ceph_assert(ictx->exclusive_lock == nullptr || ictx->exclusive_lock->is_lock_owner()); C_SaferCond ctx; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); operation::TrimRequest<> *req = operation::TrimRequest<>::create( *ictx, &ctx, ictx->size, newsize, prog_ctx); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); req->send(); int r = ctx.wait(); @@ -520,7 +521,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { return r; } - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name); @@ -605,7 +606,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { int r = ictx->state->refresh_if_required(); if (r < 0) return r; - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; snap_t snap_id = ictx->get_snap_id(*snap_namespace, snap_name); if (snap_id == CEPH_NOSNAP) return -ENOENT; @@ -622,7 +623,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { if (r < 0) return r; - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name); if (snap_id == CEPH_NOSNAP) return -ENOENT; @@ -953,7 +954,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { int r = ictx->state->refresh_if_required(); if (r < 0) return r; - RWLock::RLocker l2(ictx->image_lock); + std::shared_lock l2{ictx->image_lock}; *size = ictx->get_image_size(ictx->snap_id); return 0; } @@ -963,7 +964,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { int r = ictx->state->refresh_if_required(); if (r < 0) return r; - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; *features = ictx->features; return 0; } @@ -973,7 +974,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { int r = ictx->state->refresh_if_required(); if (r < 0) return r; - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; return ictx->get_parent_overlap(ictx->snap_id, overlap); } @@ -984,7 +985,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { return r; } - RWLock::RLocker l2(ictx->image_lock); + std::shared_lock l2{ictx->image_lock}; return ictx->get_flags(ictx->snap_id, flags); } @@ -1009,7 +1010,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl; *is_owner = false; - RWLock::RLocker owner_locker(ictx->owner_lock); + std::shared_lock owner_locker{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr) { return 0; } @@ -1039,7 +1040,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { C_SaferCond lock_ctx; { - RWLock::WLocker l(ictx->owner_lock); + std::unique_lock l{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr) { lderr(cct) << "exclusive-lock feature is not enabled" << dendl; @@ -1065,7 +1066,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { return r; } - RWLock::RLocker l(ictx->owner_lock); + std::shared_lock l{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr) { return -EINVAL; } else if (!ictx->exclusive_lock->is_lock_owner()) { @@ -1083,7 +1084,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { C_SaferCond lock_ctx; { - RWLock::WLocker l(ictx->owner_lock); + std::unique_lock l{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr || !ictx->exclusive_lock->is_lock_owner()) { @@ -1150,7 +1151,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { managed_lock::Locker locker; C_SaferCond get_owner_ctx; { - RWLock::RLocker l(ictx->owner_lock); + std::shared_lock l{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr) { lderr(cct) << "exclusive-lock feature is not enabled" << dendl; @@ -1174,7 +1175,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { C_SaferCond break_ctx; { - RWLock::RLocker l(ictx->owner_lock); + std::shared_lock l{ictx->owner_lock}; if (ictx->exclusive_lock == nullptr) { lderr(cct) << "exclusive-lock feature is not enabled" << dendl; @@ -1201,7 +1202,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { if (r < 0) return r; - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; for (map::iterator it = ictx->snap_info.begin(); it != ictx->snap_info.end(); ++it) { snap_info_t info; @@ -1223,7 +1224,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { if (r < 0) return r; - RWLock::RLocker l(ictx->image_lock); + std::shared_lock l{ictx->image_lock}; *exists = ictx->get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP; return 0; } @@ -1325,10 +1326,10 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { << (src->snap_name.length() ? "@" + src->snap_name : "") << " -> " << destname << " opts = " << opts << dendl; - src->image_lock.get_read(); + src->image_lock.lock_shared(); uint64_t features = src->features; uint64_t src_size = src->get_image_size(src->snap_id); - src->image_lock.put_read(); + src->image_lock.unlock_shared(); uint64_t format = src->old_format ? 1 : 2; if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) { opts.set(RBD_IMAGE_OPTION_FORMAT, format); @@ -1470,13 +1471,13 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size) { - src->image_lock.get_read(); + src->image_lock.lock_shared(); uint64_t src_size = src->get_image_size(src->snap_id); - src->image_lock.put_read(); + src->image_lock.unlock_shared(); - dest->image_lock.get_read(); + dest->image_lock.lock_shared(); uint64_t dest_size = dest->get_image_size(dest->snap_id); - dest->image_lock.put_read(); + dest->image_lock.unlock_shared(); CephContext *cct = src->cct; if (dest_size < src_size) { @@ -1514,7 +1515,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { trace.init("copy", &src->trace_endpoint); } - RWLock::RLocker owner_lock(src->owner_lock); + std::shared_lock owner_lock{src->owner_lock}; SimpleThrottle throttle(src->config.get_val("rbd_concurrent_management_ops"), false); uint64_t period = src->get_stripe_period(); unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL | @@ -1526,7 +1527,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { } { - RWLock::RLocker image_locker(src->image_lock); + std::shared_lock image_locker{src->image_lock}; if (src->object_map != nullptr) { bool skip = true; // each period is related to src->stripe_count objects, check them all @@ -1576,7 +1577,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { if (r < 0) return r; - RWLock::RLocker locker(ictx->image_lock); + std::shared_lock locker{ictx->image_lock}; if (exclusive) *exclusive = ictx->exclusive_locked; if (tag) @@ -1614,7 +1615,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { * duplicate that code. */ { - RWLock::RLocker locker(ictx->image_lock); + std::shared_lock locker{ictx->image_lock}; r = rados::cls::lock::lock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME, exclusive ? LOCK_EXCLUSIVE : LOCK_SHARED, cookie, tag, "", utime_t(), 0); @@ -1637,7 +1638,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { return r; { - RWLock::RLocker locker(ictx->image_lock); + std::shared_lock locker{ictx->image_lock}; r = rados::cls::lock::unlock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME, cookie); if (r < 0) { @@ -1735,9 +1736,9 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { return r; uint64_t mylen = len; - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); r = clip_io(ictx, off, &mylen); - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); if (r < 0) return r; @@ -1750,7 +1751,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { trace.init("read_iterate", &ictx->trace_endpoint); } - RWLock::RLocker owner_locker(ictx->owner_lock); + std::shared_lock owner_locker{ictx->owner_lock}; start_time = coarse_mono_clock::now(); while (left > 0) { uint64_t period_off = off - (off % period); @@ -1789,7 +1790,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { // validate extent against image size; clip to image size if necessary int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len) { - ceph_assert(ictx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(ictx->image_lock)); uint64_t image_size = ictx->get_image_size(ictx->snap_id); bool snap_exists = ictx->snap_exists; @@ -1823,7 +1824,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { C_SaferCond ctx; { - RWLock::RLocker owner_locker(ictx->owner_lock); + std::shared_lock owner_locker{ictx->owner_lock}; ictx->io_object_dispatcher->invalidate_cache(&ctx); } r = ctx.wait(); diff --git a/src/librbd/io/AsyncOperation.cc b/src/librbd/io/AsyncOperation.cc index c5a3bc932e0a..2a00cd6699ac 100644 --- a/src/librbd/io/AsyncOperation.cc +++ b/src/librbd/io/AsyncOperation.cc @@ -25,7 +25,7 @@ struct C_CompleteFlushes : public Context { : image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) { } void finish(int r) override { - RWLock::RLocker owner_locker(image_ctx->owner_lock); + std::shared_lock owner_locker{image_ctx->owner_lock}; while (!flush_contexts.empty()) { Context *flush_ctx = flush_contexts.front(); flush_contexts.pop_front(); @@ -43,7 +43,7 @@ void AsyncOperation::start_op(ImageCtx &image_ctx) { m_image_ctx = &image_ctx; ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl; - Mutex::Locker l(m_image_ctx->async_ops_lock); + std::lock_guard l{m_image_ctx->async_ops_lock}; m_image_ctx->async_ops.push_front(&m_xlist_item); } @@ -51,7 +51,7 @@ void AsyncOperation::finish_op() { ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl; { - Mutex::Locker l(m_image_ctx->async_ops_lock); + std::lock_guard l{m_image_ctx->async_ops_lock}; xlist::iterator iter(&m_xlist_item); ++iter; ceph_assert(m_xlist_item.remove_myself()); @@ -76,7 +76,7 @@ void AsyncOperation::finish_op() { void AsyncOperation::flush(Context* on_finish) { { - Mutex::Locker locker(m_image_ctx->async_ops_lock); + std::lock_guard locker{m_image_ctx->async_ops_lock}; xlist::iterator iter(&m_xlist_item); ++iter; diff --git a/src/librbd/io/CopyupRequest.cc b/src/librbd/io/CopyupRequest.cc index 0f410966ad47..e9f5d2487ff8 100644 --- a/src/librbd/io/CopyupRequest.cc +++ b/src/librbd/io/CopyupRequest.cc @@ -3,9 +3,9 @@ #include "librbd/io/CopyupRequest.h" #include "common/ceph_context.h" +#include "common/ceph_mutex.h" #include "common/dout.h" #include "common/errno.h" -#include "common/Mutex.h" #include "common/WorkQueue.h" #include "librbd/AsyncObjectThrottle.h" #include "librbd/ExclusiveLock.h" @@ -52,13 +52,13 @@ public: int send() override { auto& image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); if (image_ctx.exclusive_lock == nullptr) { return 1; } ceph_assert(image_ctx.exclusive_lock->is_lock_owner()); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map == nullptr) { return 1; } @@ -73,7 +73,7 @@ public: int update_head() { auto& image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); bool sent = image_ctx.object_map->template aio_update( CEPH_NOSNAP, m_object_no, m_head_object_map_state, {}, m_trace, false, @@ -83,7 +83,7 @@ public: int update_snapshot(uint64_t snap_id) { auto& image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); uint8_t state = OBJECT_EXISTS; if (image_ctx.test_features(RBD_FEATURE_FAST_DIFF, image_ctx.image_lock) && @@ -115,8 +115,7 @@ CopyupRequest::CopyupRequest(I *ictx, uint64_t objectno, Extents &&image_extents, const ZTracer::Trace &parent_trace) : m_image_ctx(ictx), m_object_no(objectno), m_image_extents(image_extents), - m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace)), - m_lock("CopyupRequest", false, false) + m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace)) { m_async_op.start_op(*util::get_image_ctx(m_image_ctx)); } @@ -129,7 +128,7 @@ CopyupRequest::~CopyupRequest() { template void CopyupRequest::append_request(AbstractObjectWriteRequest *req) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto cct = m_image_ctx->cct; ldout(cct, 20) << "object_request=" << req << ", " @@ -149,7 +148,7 @@ void CopyupRequest::send() { template void CopyupRequest::read_from_parent() { auto cct = m_image_ctx->cct; - RWLock::RLocker image_locker(m_image_ctx->image_lock); + std::shared_lock image_locker{m_image_ctx->image_lock}; if (m_image_ctx->parent == nullptr) { ldout(cct, 5) << "parent detached" << dendl; @@ -188,15 +187,15 @@ void CopyupRequest::handle_read_from_parent(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; - m_image_ctx->image_lock.get_read(); - m_lock.Lock(); + m_image_ctx->image_lock.lock_shared(); + m_lock.lock(); m_copyup_is_zero = m_copyup_data.is_zero(); m_copyup_required = is_copyup_required(); disable_append_requests(); if (r < 0 && r != -ENOENT) { - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); lderr(cct) << "error reading from parent: " << cpp_strerror(r) << dendl; finish(r); @@ -204,8 +203,8 @@ void CopyupRequest::handle_read_from_parent(int r) { } if (!m_copyup_required) { - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); ldout(cct, 20) << "no-op, skipping" << dendl; finish(0); @@ -219,8 +218,8 @@ void CopyupRequest::handle_read_from_parent(int r) { m_image_ctx->snaps.rend()); } - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); update_object_maps(); } @@ -228,12 +227,12 @@ void CopyupRequest::handle_read_from_parent(int r) { template void CopyupRequest::deep_copy() { auto cct = m_image_ctx->cct; - ceph_assert(m_image_ctx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); ceph_assert(m_image_ctx->parent != nullptr); - m_lock.Lock(); + m_lock.lock(); m_flatten = is_copyup_required() ? true : m_image_ctx->migration_info.flatten; - m_lock.Unlock(); + m_lock.unlock(); ldout(cct, 20) << "flatten=" << m_flatten << dendl; @@ -251,12 +250,12 @@ void CopyupRequest::handle_deep_copy(int r) { auto cct = m_image_ctx->cct; ldout(cct, 20) << "r=" << r << dendl; - m_image_ctx->image_lock.get_read(); - m_lock.Lock(); + m_image_ctx->image_lock.lock_shared(); + m_lock.lock(); m_copyup_required = is_copyup_required(); if (r == -ENOENT && !m_flatten && m_copyup_required) { - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); ldout(cct, 10) << "restart deep-copy with flatten" << dendl; send(); @@ -266,8 +265,8 @@ void CopyupRequest::handle_deep_copy(int r) { disable_append_requests(); if (r < 0 && r != -ENOENT) { - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); lderr(cct) << "error encountered during deep-copy: " << cpp_strerror(r) << dendl; @@ -276,8 +275,8 @@ void CopyupRequest::handle_deep_copy(int r) { } if (!m_copyup_required && !is_update_object_map_required(r)) { - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); if (r == -ENOENT) { r = 0; @@ -296,16 +295,16 @@ void CopyupRequest::handle_deep_copy(int r) { compute_deep_copy_snap_ids(); } - m_lock.Unlock(); - m_image_ctx->image_lock.put_read(); + m_lock.unlock(); + m_image_ctx->image_lock.unlock_shared(); update_object_maps(); } template void CopyupRequest::update_object_maps() { - RWLock::RLocker owner_locker(m_image_ctx->owner_lock); - RWLock::RLocker image_locker(m_image_ctx->image_lock); + std::shared_lock owner_locker{m_image_ctx->owner_lock}; + std::shared_lock image_locker{m_image_ctx->image_lock}; if (m_image_ctx->object_map == nullptr) { image_locker.unlock(); owner_locker.unlock(); @@ -370,13 +369,13 @@ void CopyupRequest::handle_update_object_maps(int r) { template void CopyupRequest::copyup() { auto cct = m_image_ctx->cct; - m_image_ctx->image_lock.get_read(); + m_image_ctx->image_lock.lock_shared(); auto snapc = m_image_ctx->snapc; - m_image_ctx->image_lock.put_read(); + m_image_ctx->image_lock.unlock_shared(); - m_lock.Lock(); + m_lock.lock(); if (!m_copyup_required) { - m_lock.Unlock(); + m_lock.unlock(); ldout(cct, 20) << "skipping copyup" << dendl; finish(0); @@ -426,7 +425,7 @@ void CopyupRequest::copyup() { ++m_pending_copyups; } } - m_lock.Unlock(); + m_lock.unlock(); // issue librados ops at the end to simplify test cases std::string oid(data_object_name(m_image_ctx, m_object_no)); @@ -471,7 +470,7 @@ void CopyupRequest::handle_copyup(int r) { auto cct = m_image_ctx->cct; unsigned pending_copyups; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_pending_copyups > 0); pending_copyups = --m_pending_copyups; } @@ -526,13 +525,13 @@ void CopyupRequest::complete_requests(bool override_restart_retval, int r) { template void CopyupRequest::disable_append_requests() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_append_request_permitted = false; } template void CopyupRequest::remove_from_list() { - Mutex::Locker copyup_list_locker(m_image_ctx->copyup_list_lock); + std::lock_guard copyup_list_locker{m_image_ctx->copyup_list_lock}; auto it = m_image_ctx->copyup_list.find(m_object_no); if (it != m_image_ctx->copyup_list.end()) { @@ -542,7 +541,7 @@ void CopyupRequest::remove_from_list() { template bool CopyupRequest::is_copyup_required() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); bool copy_on_read = m_pending_requests.empty(); if (copy_on_read) { @@ -564,13 +563,13 @@ bool CopyupRequest::is_copyup_required() { template bool CopyupRequest::is_deep_copy() const { - ceph_assert(m_image_ctx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); return !m_image_ctx->migration_info.empty(); } template bool CopyupRequest::is_update_object_map_required(int r) { - ceph_assert(m_image_ctx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); if (r < 0) { return false; @@ -593,7 +592,7 @@ bool CopyupRequest::is_update_object_map_required(int r) { template void CopyupRequest::compute_deep_copy_snap_ids() { - ceph_assert(m_image_ctx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock)); // don't copy ids for the snaps updated by object deep copy or // that don't overlap diff --git a/src/librbd/io/CopyupRequest.h b/src/librbd/io/CopyupRequest.h index 25f19e14c314..01d679f6213b 100644 --- a/src/librbd/io/CopyupRequest.h +++ b/src/librbd/io/CopyupRequest.h @@ -7,7 +7,7 @@ #include "include/int_types.h" #include "include/rados/librados.hpp" #include "include/buffer.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/zipkin_trace.h" #include "librbd/io/AsyncOperation.h" #include "librbd/io/Types.h" @@ -96,7 +96,7 @@ private: std::vector m_snap_ids; bool m_first_snap_is_clean = false; - Mutex m_lock; + ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false); WriteRequests m_pending_requests; unsigned m_pending_copyups = 0; diff --git a/src/librbd/io/ImageRequest.cc b/src/librbd/io/ImageRequest.cc index 166c8702308b..d835368153f8 100644 --- a/src/librbd/io/ImageRequest.cc +++ b/src/librbd/io/ImageRequest.cc @@ -64,19 +64,19 @@ void readahead(I *ictx, const Extents& image_extents) { total_bytes += image_extent.second; } - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); auto total_bytes_read = ictx->total_bytes_read.fetch_add(total_bytes); bool abort = ( ictx->readahead_disable_after_bytes != 0 && total_bytes_read > ictx->readahead_disable_after_bytes); if (abort) { - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); return; } uint64_t image_size = ictx->get_image_size(ictx->snap_id); auto snap_id = ictx->snap_id; - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); auto readahead_extent = ictx->readahead.update(image_extents, image_size); uint64_t readahead_offset = readahead_extent.first; @@ -247,7 +247,7 @@ void ImageRequest::send() { template int ImageRequest::clip_request() { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; for (auto &image_extent : m_image_extents) { auto clip_len = image_extent.second; int r = clip_io(get_image_ctx(&m_image_ctx), image_extent.first, &clip_len); @@ -286,7 +286,7 @@ void ImageRequest::update_timestamp() { utime_t ts = ceph_clock_now(); { - RWLock::RLocker timestamp_locker(m_image_ctx.timestamp_lock); + std::shared_lock timestamp_locker{m_image_ctx.timestamp_lock}; if(!should_update_timestamp(ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval)) { return; @@ -294,7 +294,7 @@ void ImageRequest::update_timestamp() { } { - RWLock::WLocker timestamp_locker(m_image_ctx.timestamp_lock); + std::unique_lock timestamp_locker{m_image_ctx.timestamp_lock}; bool update = should_update_timestamp( ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval); if (!update) { @@ -355,7 +355,7 @@ void ImageReadRequest::send_request() { { // prevent image size from changing between computing clip and recording // pending async operation - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; snap_id = image_ctx.snap_id; } @@ -419,7 +419,7 @@ void AbstractImageWriteRequest::send_request() { { // prevent image size from changing between computing clip and recording // pending async operation - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.snap_id != CEPH_NOSNAP || image_ctx.read_only) { aio_comp->fail(-EROFS); return; @@ -666,7 +666,7 @@ void ImageFlushRequest::send_request() { bool journaling = false; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; journaling = (m_flush_source == FLUSH_SOURCE_USER && image_ctx.journal != nullptr && image_ctx.journal->is_journal_appending()); diff --git a/src/librbd/io/ImageRequestWQ.cc b/src/librbd/io/ImageRequestWQ.cc index fb15ca339ea6..5bb0f1664f9d 100644 --- a/src/librbd/io/ImageRequestWQ.cc +++ b/src/librbd/io/ImageRequestWQ.cc @@ -93,12 +93,13 @@ ImageRequestWQ::ImageRequestWQ(I *image_ctx, const string &name, time_t ti, ThreadPool *tp) : ThreadPool::PointerWQ >(name, ti, 0, tp), m_image_ctx(*image_ctx), - m_lock(util::unique_lock_name("ImageRequestWQ::m_lock", this)) { + m_lock(ceph::make_shared_mutex( + util::unique_lock_name("ImageRequestWQ::m_lock", this))) { CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << "ictx=" << image_ctx << dendl; SafeTimer *timer; - Mutex *timer_lock; + ceph::mutex *timer_lock; ImageCtx::get_timer_instance(cct, &timer, &timer_lock); for (auto flag : throttle_flags) { @@ -137,9 +138,9 @@ ssize_t ImageRequestWQ::write(uint64_t off, uint64_t len, ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", " << "len = " << len << dendl; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (r < 0) { lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl; return r; @@ -163,9 +164,9 @@ ssize_t ImageRequestWQ::discard(uint64_t off, uint64_t len, ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", " << "len = " << len << dendl; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (r < 0) { lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl; return r; @@ -189,9 +190,9 @@ ssize_t ImageRequestWQ::writesame(uint64_t off, uint64_t len, ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", " << "len = " << len << ", data_len " << bl.length() << dendl; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (r < 0) { lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl; return r; @@ -218,9 +219,9 @@ ssize_t ImageRequestWQ::compare_and_write(uint64_t off, uint64_t len, ldout(cct, 20) << "compare_and_write ictx=" << &m_image_ctx << ", off=" << off << ", " << "len = " << len << dendl; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (r < 0) { lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl; return r; @@ -283,7 +284,7 @@ void ImageRequestWQ::aio_read(AioCompletion *c, uint64_t off, uint64_t len, // if journaling is enabled -- we need to replay the journal because // it might contain an uncommitted write - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty() || require_lock_on_read()) { queue(ImageDispatchSpec::create_read_request( @@ -323,7 +324,7 @@ void ImageRequestWQ::aio_write(AioCompletion *c, uint64_t off, uint64_t len, return; } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked()) { queue(ImageDispatchSpec::create_write_request( m_image_ctx, c, {{off, len}}, std::move(bl), op_flags, trace)); @@ -362,7 +363,7 @@ void ImageRequestWQ::aio_discard(AioCompletion *c, uint64_t off, return; } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked()) { queue(ImageDispatchSpec::create_discard_request( m_image_ctx, c, off, len, discard_granularity_bytes, trace)); @@ -397,7 +398,7 @@ void ImageRequestWQ::aio_flush(AioCompletion *c, bool native_async) { return; } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty()) { queue(ImageDispatchSpec::create_flush_request( m_image_ctx, c, FLUSH_SOURCE_USER, trace)); @@ -435,7 +436,7 @@ void ImageRequestWQ::aio_writesame(AioCompletion *c, uint64_t off, return; } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked()) { queue(ImageDispatchSpec::create_write_same_request( m_image_ctx, c, off, len, std::move(bl), op_flags, trace)); @@ -476,7 +477,7 @@ void ImageRequestWQ::aio_compare_and_write(AioCompletion *c, return; } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_image_ctx.non_blocking_aio || writes_blocked()) { queue(ImageDispatchSpec::create_compare_and_write_request( m_image_ctx, c, {{off, len}}, std::move(cmp_bl), std::move(bl), @@ -493,10 +494,10 @@ void ImageRequestWQ::aio_compare_and_write(AioCompletion *c, template void ImageRequestWQ::shut_down(Context *on_shutdown) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ceph_assert(!m_shutdown); m_shutdown = true; @@ -522,11 +523,11 @@ int ImageRequestWQ::block_writes() { template void ImageRequestWQ::block_writes(Context *on_blocked) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ++m_write_blockers; ldout(cct, 5) << &m_image_ctx << ", " << "num=" << m_write_blockers << dendl; @@ -547,7 +548,7 @@ void ImageRequestWQ::unblock_writes() { bool wake_up = false; Contexts waiter_contexts; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ceph_assert(m_write_blockers > 0); --m_write_blockers; @@ -569,11 +570,11 @@ void ImageRequestWQ::unblock_writes() { template void ImageRequestWQ::wait_on_writes_unblocked(Context *on_unblocked) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ldout(cct, 20) << &m_image_ctx << ", " << "write_blockers=" << m_write_blockers << dendl; if (!m_unblocked_write_waiter_contexts.empty() || m_write_blockers > 0) { @@ -592,7 +593,7 @@ void ImageRequestWQ::set_require_lock(Direction direction, bool enabled) { bool wake_up = false; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; switch (direction) { case DIRECTION_READ: wake_up = (enabled != m_require_lock_on_read); @@ -720,7 +721,7 @@ void *ImageRequestWQ::_void_dequeue() { bool lock_required; bool refresh_required = m_image_ctx.state->is_refresh_required(); { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; bool write_op = peek_item->is_write_op(); lock_required = is_lock_required(write_op); if (write_op) { @@ -742,7 +743,7 @@ void *ImageRequestWQ::_void_dequeue() { if (lock_required) { this->get_pool_lock().unlock(); - m_image_ctx.owner_lock.get_read(); + m_image_ctx.owner_lock.lock_shared(); if (m_image_ctx.exclusive_lock != nullptr) { ldout(cct, 5) << "exclusive lock required: delaying IO " << item << dendl; if (!m_image_ctx.get_exclusive_lock_policy()->may_auto_request_lock()) { @@ -761,7 +762,7 @@ void *ImageRequestWQ::_void_dequeue() { // raced with the exclusive lock being disabled lock_required = false; } - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); this->get_pool_lock().lock(); if (lock_required) { @@ -804,7 +805,7 @@ void ImageRequestWQ::process(ImageDispatchSpec *req) { template void ImageRequestWQ::finish_queued_io(ImageDispatchSpec *req) { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; if (req->is_write_op()) { ceph_assert(m_queued_writes > 0); m_queued_writes--; @@ -818,7 +819,7 @@ template void ImageRequestWQ::finish_in_flight_write() { bool writes_blocked = false; { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; ceph_assert(m_in_flight_writes > 0); if (--m_in_flight_writes == 0 && !m_write_blocker_contexts.empty()) { @@ -833,7 +834,7 @@ void ImageRequestWQ::finish_in_flight_write() { template int ImageRequestWQ::start_in_flight_io(AioCompletion *c) { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; if (m_shutdown) { CephContext *cct = m_image_ctx.cct; @@ -851,7 +852,7 @@ template void ImageRequestWQ::finish_in_flight_io() { Context *on_shutdown; { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; if (--m_in_flight_ios > 0 || !m_shutdown) { return; } @@ -877,14 +878,14 @@ void ImageRequestWQ::fail_in_flight_io( template bool ImageRequestWQ::is_lock_required(bool write_op) const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); return ((write_op && m_require_lock_on_write) || (!write_op && m_require_lock_on_read)); } template void ImageRequestWQ::queue(ImageDispatchSpec *req) { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << "ictx=" << &m_image_ctx << ", " @@ -941,7 +942,7 @@ template void ImageRequestWQ::handle_blocked_writes(int r) { Contexts contexts; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; contexts.swap(m_write_blocker_contexts); } diff --git a/src/librbd/io/ImageRequestWQ.h b/src/librbd/io/ImageRequestWQ.h index a9134fc4a1bf..daa596330d82 100644 --- a/src/librbd/io/ImageRequestWQ.h +++ b/src/librbd/io/ImageRequestWQ.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_IO_IMAGE_REQUEST_WQ_H #include "include/Context.h" -#include "common/RWLock.h" +#include "common/ceph_mutex.h" #include "common/Throttle.h" #include "common/WorkQueue.h" #include "librbd/io/Types.h" @@ -62,7 +62,7 @@ public: void shut_down(Context *on_shutdown); inline bool writes_blocked() const { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; return (m_write_blockers > 0); } @@ -94,7 +94,7 @@ private: struct C_RefreshFinish; ImageCtxT &m_image_ctx; - mutable RWLock m_lock; + mutable ceph::shared_mutex m_lock; Contexts m_write_blocker_contexts; uint32_t m_write_blockers = 0; Contexts m_unblocked_write_waiter_contexts; @@ -116,11 +116,11 @@ private: bool is_lock_required(bool write_op) const; inline bool require_lock_on_read() const { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; return m_require_lock_on_read; } inline bool writes_empty() const { - RWLock::RLocker locker(m_lock); + std::shared_lock locker{m_lock}; return (m_queued_writes == 0); } diff --git a/src/librbd/io/ObjectDispatcher.cc b/src/librbd/io/ObjectDispatcher.cc index 5e5d482ff59a..513f8adf43dc 100644 --- a/src/librbd/io/ObjectDispatcher.cc +++ b/src/librbd/io/ObjectDispatcher.cc @@ -34,11 +34,11 @@ struct ObjectDispatcher::C_LayerIterator : public Context { void complete(int r) override { while (true) { - object_dispatcher->m_lock.get_read(); + object_dispatcher->m_lock.lock_shared(); auto it = object_dispatcher->m_object_dispatches.upper_bound( object_dispatch_layer); if (it == object_dispatcher->m_object_dispatches.end()) { - object_dispatcher->m_lock.put_read(); + object_dispatcher->m_lock.unlock_shared(); Context::complete(r); return; } @@ -48,7 +48,7 @@ struct ObjectDispatcher::C_LayerIterator : public Context { // prevent recursive locking back into the dispatcher while handling IO object_dispatch_meta.async_op_tracker->start_op(); - object_dispatcher->m_lock.put_read(); + object_dispatcher->m_lock.unlock_shared(); // next loop should start after current layer object_dispatch_layer = object_dispatch->get_object_dispatch_layer(); @@ -175,8 +175,9 @@ struct ObjectDispatcher::SendVisitor : public boost::static_visitor { template ObjectDispatcher::ObjectDispatcher(I* image_ctx) : m_image_ctx(image_ctx), - m_lock(librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock", - this)) { + m_lock(ceph::make_shared_mutex( + librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock", + this))) { // configure the core object dispatch handler on startup auto object_dispatch = new ObjectDispatch(image_ctx); m_object_dispatches[object_dispatch->get_object_dispatch_layer()] = @@ -195,7 +196,7 @@ void ObjectDispatcher::shut_down(Context* on_finish) { std::map object_dispatches; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; std::swap(object_dispatches, m_object_dispatches); } @@ -212,7 +213,7 @@ void ObjectDispatcher::register_object_dispatch( auto type = object_dispatch->get_object_dispatch_layer(); ldout(cct, 5) << "object_dispatch_layer=" << type << dendl; - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; ceph_assert(type < OBJECT_DISPATCH_LAYER_LAST); auto result = m_object_dispatches.insert( @@ -229,7 +230,7 @@ void ObjectDispatcher::shut_down_object_dispatch( ObjectDispatchMeta object_dispatch_meta; { - RWLock::WLocker locker(m_lock); + std::unique_lock locker{m_lock}; auto it = m_object_dispatches.find(object_dispatch_layer); ceph_assert(it != m_object_dispatches.end()); @@ -310,13 +311,13 @@ void ObjectDispatcher::send(ObjectDispatchSpec* object_dispatch_spec) { // apply the IO request to all layers -- this method will be re-invoked // by the dispatch layer if continuing / restarting the IO while (true) { - m_lock.get_read(); + m_lock.lock_shared(); object_dispatch_layer = object_dispatch_spec->object_dispatch_layer; auto it = m_object_dispatches.upper_bound(object_dispatch_layer); if (it == m_object_dispatches.end()) { // the request is complete if handled by all layers object_dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE; - m_lock.put_read(); + m_lock.unlock_shared(); break; } @@ -326,7 +327,7 @@ void ObjectDispatcher::send(ObjectDispatchSpec* object_dispatch_spec) { // prevent recursive locking back into the dispatcher while handling IO object_dispatch_meta.async_op_tracker->start_op(); - m_lock.put_read(); + m_lock.unlock_shared(); // advance to next layer in case we skip or continue object_dispatch_spec->object_dispatch_layer = diff --git a/src/librbd/io/ObjectDispatcher.h b/src/librbd/io/ObjectDispatcher.h index 0370d2684f32..c949a97573fb 100644 --- a/src/librbd/io/ObjectDispatcher.h +++ b/src/librbd/io/ObjectDispatcher.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H #include "include/int_types.h" -#include "common/RWLock.h" +#include "common/ceph_mutex.h" #include "librbd/io/Types.h" #include @@ -71,7 +71,7 @@ private: ImageCtxT* m_image_ctx; - RWLock m_lock; + ceph::shared_mutex m_lock; std::map m_object_dispatches; void send(ObjectDispatchSpec* object_dispatch_spec); diff --git a/src/librbd/io/ObjectRequest.cc b/src/librbd/io/ObjectRequest.cc index 1c3ac150885a..b8845f6b7f4e 100644 --- a/src/librbd/io/ObjectRequest.cc +++ b/src/librbd/io/ObjectRequest.cc @@ -5,8 +5,7 @@ #include "common/ceph_context.h" #include "common/dout.h" #include "common/errno.h" -#include "common/Mutex.h" -#include "common/RWLock.h" +#include "common/ceph_mutex.h" #include "common/WorkQueue.h" #include "include/Context.h" #include "include/err.h" @@ -40,7 +39,7 @@ namespace { template inline bool is_copy_on_read(I *ictx, librados::snap_t snap_id) { - RWLock::RLocker image_locker(ictx->image_lock); + std::shared_lock image_locker{ictx->image_lock}; return (ictx->clone_copy_on_read && !ictx->read_only && snap_id == CEPH_NOSNAP && (ictx->exclusive_lock == nullptr || @@ -123,7 +122,7 @@ void ObjectRequest::add_write_hint(I& image_ctx, template bool ObjectRequest::compute_parent_extents(Extents *parent_extents, bool read_request) { - ceph_assert(m_ictx->image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_ictx->image_lock)); m_has_parent = false; parent_extents->clear(); @@ -197,7 +196,7 @@ template void ObjectReadRequest::read_object() { I *image_ctx = this->m_ictx; { - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock image_locker{image_ctx->image_lock}; if (image_ctx->object_map != nullptr && !image_ctx->object_map->object_may_exist(this->m_object_no)) { image_ctx->op_work_queue->queue(new FunctionContext([this](int r) { @@ -252,7 +251,7 @@ template void ObjectReadRequest::read_parent() { I *image_ctx = this->m_ictx; - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock image_locker{image_ctx->image_lock}; // calculate reverse mapping onto the image Extents parent_extents; @@ -311,21 +310,21 @@ void ObjectReadRequest::copyup() { return; } - image_ctx->owner_lock.get_read(); - image_ctx->image_lock.get_read(); + image_ctx->owner_lock.lock_shared(); + image_ctx->image_lock.lock_shared(); Extents parent_extents; if (!this->compute_parent_extents(&parent_extents, true) || (image_ctx->exclusive_lock != nullptr && !image_ctx->exclusive_lock->is_lock_owner())) { - image_ctx->image_lock.put_read(); - image_ctx->owner_lock.put_read(); + image_ctx->image_lock.unlock_shared(); + image_ctx->owner_lock.unlock_shared(); this->finish(0); return; } ldout(image_ctx->cct, 20) << dendl; - image_ctx->copyup_list_lock.Lock(); + image_ctx->copyup_list_lock.lock(); auto it = image_ctx->copyup_list.find(this->m_object_no); if (it == image_ctx->copyup_list.end()) { // create and kick off a CopyupRequest @@ -333,15 +332,15 @@ void ObjectReadRequest::copyup() { image_ctx, this->m_object_no, std::move(parent_extents), this->m_trace); image_ctx->copyup_list[this->m_object_no] = new_req; - image_ctx->copyup_list_lock.Unlock(); - image_ctx->image_lock.put_read(); + image_ctx->copyup_list_lock.unlock(); + image_ctx->image_lock.unlock_shared(); new_req->send(); } else { - image_ctx->copyup_list_lock.Unlock(); - image_ctx->image_lock.put_read(); + image_ctx->copyup_list_lock.unlock(); + image_ctx->image_lock.unlock_shared(); } - image_ctx->owner_lock.put_read(); + image_ctx->owner_lock.unlock_shared(); this->finish(0); } @@ -365,17 +364,17 @@ AbstractObjectWriteRequest::AbstractObjectWriteRequest( compute_parent_info(); - ictx->image_lock.get_read(); + ictx->image_lock.lock_shared(); if (!ictx->migration_info.empty()) { m_guarding_migration_write = true; } - ictx->image_lock.put_read(); + ictx->image_lock.unlock_shared(); } template void AbstractObjectWriteRequest::compute_parent_info() { I *image_ctx = this->m_ictx; - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock image_locker{image_ctx->image_lock}; this->compute_parent_extents(&m_parent_extents, false); @@ -389,7 +388,7 @@ template void AbstractObjectWriteRequest::add_write_hint( librados::ObjectWriteOperation *wr) { I *image_ctx = this->m_ictx; - RWLock::RLocker image_locker(image_ctx->image_lock); + std::shared_lock image_locker{image_ctx->image_lock}; if (image_ctx->object_map == nullptr || !this->m_object_may_exist) { ObjectRequest::add_write_hint(*image_ctx, wr); } @@ -402,7 +401,7 @@ void AbstractObjectWriteRequest::send() { << this->m_object_off << "~" << this->m_object_len << dendl; { - RWLock::RLocker image_lock(image_ctx->image_lock); + std::shared_lock image_lock{image_ctx->image_lock}; if (image_ctx->object_map == nullptr) { m_object_may_exist = true; } else { @@ -427,16 +426,16 @@ template void AbstractObjectWriteRequest::pre_write_object_map_update() { I *image_ctx = this->m_ictx; - image_ctx->image_lock.get_read(); + image_ctx->image_lock.lock_shared(); if (image_ctx->object_map == nullptr || !is_object_map_update_enabled()) { - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); write_object(); return; } if (!m_object_may_exist && m_copyup_enabled) { // optimization: copyup required - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); copyup(); return; } @@ -450,11 +449,11 @@ void AbstractObjectWriteRequest::pre_write_object_map_update() { &AbstractObjectWriteRequest::handle_pre_write_object_map_update>( CEPH_NOSNAP, this->m_object_no, new_state, {}, this->m_trace, false, this)) { - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); return; } - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); write_object(); } @@ -515,9 +514,9 @@ void AbstractObjectWriteRequest::handle_write_object(int r) { return; } } else if (r == -ERANGE && m_guarding_migration_write) { - image_ctx->image_lock.get_read(); + image_ctx->image_lock.lock_shared(); m_guarding_migration_write = !image_ctx->migration_info.empty(); - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); if (m_guarding_migration_write) { copyup(); @@ -549,7 +548,7 @@ void AbstractObjectWriteRequest::copyup() { ceph_assert(!m_copyup_in_progress); m_copyup_in_progress = true; - image_ctx->copyup_list_lock.Lock(); + image_ctx->copyup_list_lock.lock(); auto it = image_ctx->copyup_list.find(this->m_object_no); if (it == image_ctx->copyup_list.end()) { auto new_req = CopyupRequest::create( @@ -561,11 +560,11 @@ void AbstractObjectWriteRequest::copyup() { new_req->append_request(this); image_ctx->copyup_list[this->m_object_no] = new_req; - image_ctx->copyup_list_lock.Unlock(); + image_ctx->copyup_list_lock.unlock(); new_req->send(); } else { it->second->append_request(this); - image_ctx->copyup_list_lock.Unlock(); + image_ctx->copyup_list_lock.unlock(); } } @@ -596,10 +595,10 @@ template void AbstractObjectWriteRequest::post_write_object_map_update() { I *image_ctx = this->m_ictx; - image_ctx->image_lock.get_read(); + image_ctx->image_lock.lock_shared(); if (image_ctx->object_map == nullptr || !is_object_map_update_enabled() || !is_non_existent_post_write_object_map_state()) { - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); this->finish(0); return; } @@ -613,11 +612,11 @@ void AbstractObjectWriteRequest::post_write_object_map_update() { &AbstractObjectWriteRequest::handle_post_write_object_map_update>( CEPH_NOSNAP, this->m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING, this->m_trace, false, this)) { - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); return; } - image_ctx->image_lock.put_read(); + image_ctx->image_lock.unlock_shared(); this->finish(0); } diff --git a/src/librbd/io/SimpleSchedulerObjectDispatch.cc b/src/librbd/io/SimpleSchedulerObjectDispatch.cc index 6e1632c049cd..1abb8e3109ec 100644 --- a/src/librbd/io/SimpleSchedulerObjectDispatch.cc +++ b/src/librbd/io/SimpleSchedulerObjectDispatch.cc @@ -140,7 +140,7 @@ void SimpleSchedulerObjectDispatch::ObjectRequests::try_merge_delayed_request template void SimpleSchedulerObjectDispatch::ObjectRequests::dispatch_delayed_requests( - I *image_ctx, LatencyStats *latency_stats, Mutex *latency_stats_lock) { + I *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock) { for (auto &it : m_delayed_requests) { auto offset = it.first; auto &merged_requests = it.second; @@ -149,7 +149,7 @@ void SimpleSchedulerObjectDispatch::ObjectRequests::dispatch_delayed_requests [requests=std::move(merged_requests.requests), latency_stats, latency_stats_lock, start_time=ceph_clock_now()](int r) { if (latency_stats) { - Mutex::Locker locker(*latency_stats_lock); + std::lock_guard locker{*latency_stats_lock}; auto latency = ceph_clock_now() - start_time; latency_stats->add(latency.to_nsec()); } @@ -167,15 +167,15 @@ void SimpleSchedulerObjectDispatch::ObjectRequests::dispatch_delayed_requests req->send(); } - m_dispatch_time = utime_t(); + m_dispatch_time = {}; } template SimpleSchedulerObjectDispatch::SimpleSchedulerObjectDispatch( I* image_ctx) : m_image_ctx(image_ctx), - m_lock(librbd::util::unique_lock_name( - "librbd::io::SimpleSchedulerObjectDispatch::lock", this)), + m_lock(ceph::make_mutex(librbd::util::unique_lock_name( + "librbd::io::SimpleSchedulerObjectDispatch::lock", this))), m_max_delay(image_ctx->config.template get_val( "rbd_io_scheduler_simple_max_delay")) { CephContext *cct = m_image_ctx->cct; @@ -220,7 +220,7 @@ bool SimpleSchedulerObjectDispatch::read( ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (intersects(object_no, object_off, object_len)) { dispatch_delayed_requests(object_no); } @@ -239,7 +239,7 @@ bool SimpleSchedulerObjectDispatch::discard( ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); @@ -257,7 +257,7 @@ bool SimpleSchedulerObjectDispatch::write( ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << data.length() << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (try_delay_write(object_no, object_off, std::move(data), snapc, op_flags, *object_dispatch_flags, on_dispatched)) { *dispatch_result = DISPATCH_RESULT_COMPLETE; @@ -282,7 +282,7 @@ bool SimpleSchedulerObjectDispatch::write_same( ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << object_len << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); @@ -301,7 +301,7 @@ bool SimpleSchedulerObjectDispatch::compare_and_write( ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " " << object_off << "~" << cmp_data.length() << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); register_in_flight_request(object_no, {}, on_finish); @@ -316,7 +316,7 @@ bool SimpleSchedulerObjectDispatch::flush( auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; dispatch_all_delayed_requests(); return false; @@ -325,7 +325,7 @@ bool SimpleSchedulerObjectDispatch::flush( template bool SimpleSchedulerObjectDispatch::intersects( uint64_t object_no, uint64_t object_off, uint64_t len) const { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; auto it = m_requests.find(object_no); @@ -342,7 +342,7 @@ bool SimpleSchedulerObjectDispatch::try_delay_write( uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data, const ::SnapContext &snapc, int op_flags, int object_dispatch_flags, Context* on_dispatched) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; if (m_latency_stats && !m_latency_stats->is_ready()) { @@ -365,11 +365,11 @@ bool SimpleSchedulerObjectDispatch::try_delay_write( // schedule dispatch on the first request added if (delayed && !object_requests->is_scheduled_dispatch()) { - auto dispatch_time = ceph_clock_now(); + auto dispatch_time = ceph::real_clock::now(); if (m_latency_stats) { - dispatch_time += utime_t(0, m_latency_stats->avg() / 2); + dispatch_time += std::chrono::nanoseconds(m_latency_stats->avg() / 2); } else { - dispatch_time += utime_t(0, m_max_delay * 1000000); + dispatch_time += std::chrono::milliseconds(m_max_delay); } object_requests->set_scheduled_dispatch(dispatch_time); m_dispatch_queue.push_back(object_requests); @@ -383,7 +383,7 @@ bool SimpleSchedulerObjectDispatch::try_delay_write( template void SimpleSchedulerObjectDispatch::dispatch_all_delayed_requests() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; @@ -408,7 +408,7 @@ void SimpleSchedulerObjectDispatch::register_in_flight_request( [this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) { ctx->complete(r); - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; if (m_latency_stats && start_time != utime_t()) { auto latency = ceph_clock_now() - start_time; m_latency_stats->add(latency.to_nsec()); @@ -427,7 +427,7 @@ void SimpleSchedulerObjectDispatch::register_in_flight_request( template void SimpleSchedulerObjectDispatch::dispatch_delayed_requests( uint64_t object_no) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; auto it = m_requests.find(object_no); @@ -443,7 +443,7 @@ void SimpleSchedulerObjectDispatch::dispatch_delayed_requests( template void SimpleSchedulerObjectDispatch::dispatch_delayed_requests( ObjectRequestsRef object_requests) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << "object_no=" << object_requests->get_object_no() << ", " @@ -467,10 +467,10 @@ void SimpleSchedulerObjectDispatch::dispatch_delayed_requests( template void SimpleSchedulerObjectDispatch::schedule_dispatch_delayed_requests() { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); auto cct = m_image_ctx->cct; - Mutex::Locker timer_locker(*m_timer_lock); + std::lock_guard timer_locker{*m_timer_lock}; if (m_timer_task != nullptr) { ldout(cct, 20) << "canceling task " << m_timer_task << dendl; @@ -500,7 +500,7 @@ void SimpleSchedulerObjectDispatch::schedule_dispatch_delayed_requests() { m_timer_task = new FunctionContext( [this, object_no=object_requests->get_object_no()](int r) { - ceph_assert(m_timer_lock->is_locked()); + ceph_assert(ceph_mutex_is_locked(*m_timer_lock)); auto cct = m_image_ctx->cct; ldout(cct, 20) << "running timer task " << m_timer_task << dendl; @@ -508,7 +508,7 @@ void SimpleSchedulerObjectDispatch::schedule_dispatch_delayed_requests() { m_image_ctx->op_work_queue->queue( new FunctionContext( [this, object_no](int r) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); }), 0); }); diff --git a/src/librbd/io/SimpleSchedulerObjectDispatch.h b/src/librbd/io/SimpleSchedulerObjectDispatch.h index 90f7747173e4..e5a88371d7f3 100644 --- a/src/librbd/io/SimpleSchedulerObjectDispatch.h +++ b/src/librbd/io/SimpleSchedulerObjectDispatch.h @@ -4,7 +4,7 @@ #ifndef CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H #define CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/snap_types.h" #include "include/interval_set.h" #include "include/utime.h" @@ -111,6 +111,8 @@ private: class ObjectRequests { public: + using clock_t = ceph::real_clock; + ObjectRequests(uint64_t object_no) : m_object_no(object_no) { } @@ -126,16 +128,16 @@ private: return m_dispatch_seq; } - utime_t get_dispatch_time() const { + clock_t::time_point get_dispatch_time() const { return m_dispatch_time; } - void set_scheduled_dispatch(const utime_t &dispatch_time) { + void set_scheduled_dispatch(const clock_t::time_point &dispatch_time) { m_dispatch_time = dispatch_time; } bool is_scheduled_dispatch() const { - return m_dispatch_time != utime_t(); + return !clock_t::is_zero(m_dispatch_time); } size_t delayed_requests_size() const { @@ -152,12 +154,12 @@ private: void dispatch_delayed_requests(ImageCtxT *image_ctx, LatencyStats *latency_stats, - Mutex *latency_stats_lock); + ceph::mutex *latency_stats_lock); private: uint64_t m_object_no; uint64_t m_dispatch_seq = 0; - utime_t m_dispatch_time; + clock_t::time_point m_dispatch_time; SnapContext m_snapc = {0, {}}; int m_op_flags = 0; int m_object_dispatch_flags = 0; @@ -174,9 +176,9 @@ private: ImageCtxT *m_image_ctx; - Mutex m_lock; + ceph::mutex m_lock; SafeTimer *m_timer; - Mutex *m_timer_lock; + ceph::mutex *m_timer_lock; uint64_t m_max_delay; uint64_t m_dispatch_seq = 0; diff --git a/src/librbd/journal/CreateRequest.h b/src/librbd/journal/CreateRequest.h index c2b8cd9cf796..8a4e40f88c44 100644 --- a/src/librbd/journal/CreateRequest.h +++ b/src/librbd/journal/CreateRequest.h @@ -8,7 +8,7 @@ #include "include/buffer.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "librbd/ImageCtx.h" #include "journal/Journaler.h" #include "librbd/journal/Types.h" @@ -73,7 +73,7 @@ private: bufferlist m_bl; Journaler *m_journaler; SafeTimer *m_timer; - Mutex *m_timer_lock; + ceph::mutex *m_timer_lock; int m_r_saved; int64_t m_pool_id = -1; diff --git a/src/librbd/journal/DemoteRequest.cc b/src/librbd/journal/DemoteRequest.cc index 7656caac8090..649388266b08 100644 --- a/src/librbd/journal/DemoteRequest.cc +++ b/src/librbd/journal/DemoteRequest.cc @@ -26,7 +26,7 @@ using librbd::util::create_context_callback; template DemoteRequest::DemoteRequest(I &image_ctx, Context *on_finish) : m_image_ctx(image_ctx), m_on_finish(on_finish), - m_lock("DemoteRequest::m_lock") { + m_lock(ceph::make_mutex("DemoteRequest::m_lock")) { } template diff --git a/src/librbd/journal/DemoteRequest.h b/src/librbd/journal/DemoteRequest.h index 5fea7f47b301..6aba6cc8f473 100644 --- a/src/librbd/journal/DemoteRequest.h +++ b/src/librbd/journal/DemoteRequest.h @@ -4,7 +4,7 @@ #ifndef CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H #define CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "cls/journal/cls_journal_types.h" #include "journal/Future.h" #include "librbd/journal/Types.h" @@ -69,7 +69,7 @@ private: Journaler *m_journaler = nullptr; int m_ret_val = 0; - Mutex m_lock; + ceph::mutex m_lock; ImageClientMeta m_client_meta; uint64_t m_tag_tid = 0; TagData m_tag_data; diff --git a/src/librbd/journal/OpenRequest.cc b/src/librbd/journal/OpenRequest.cc index a5178de804c4..e0cde38d0c35 100644 --- a/src/librbd/journal/OpenRequest.cc +++ b/src/librbd/journal/OpenRequest.cc @@ -25,7 +25,7 @@ using librbd::util::create_context_callback; using util::C_DecodeTags; template -OpenRequest::OpenRequest(I *image_ctx, Journaler *journaler, Mutex *lock, +OpenRequest::OpenRequest(I *image_ctx, Journaler *journaler, ceph::mutex *lock, journal::ImageClientMeta *client_meta, uint64_t *tag_tid, journal::TagData *tag_data, Context *on_finish) @@ -97,7 +97,7 @@ void OpenRequest::handle_init(int r) { m_tag_class = image_client_meta->tag_class; { - Mutex::Locker locker(*m_lock); + std::lock_guard locker{*m_lock}; *m_client_meta = *image_client_meta; } diff --git a/src/librbd/journal/OpenRequest.h b/src/librbd/journal/OpenRequest.h index f71d8c536768..0f10bccf1ade 100644 --- a/src/librbd/journal/OpenRequest.h +++ b/src/librbd/journal/OpenRequest.h @@ -4,11 +4,11 @@ #ifndef CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H #define CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H +#include "common/ceph_mutex.h" #include "include/int_types.h" #include "librbd/journal/TypeTraits.h" struct Context; -struct Mutex; namespace librbd { @@ -25,14 +25,14 @@ public: typedef typename TypeTraits::Journaler Journaler; static OpenRequest* create(ImageCtxT *image_ctx, Journaler *journaler, - Mutex *lock, journal::ImageClientMeta *client_meta, + ceph::mutex *lock, journal::ImageClientMeta *client_meta, uint64_t *tag_tid, journal::TagData *tag_data, Context *on_finish) { return new OpenRequest(image_ctx, journaler, lock, client_meta, tag_tid, tag_data, on_finish); } - OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, Mutex *lock, + OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, ceph::mutex *lock, journal::ImageClientMeta *client_meta, uint64_t *tag_tid, journal::TagData *tag_data, Context *on_finish); @@ -59,7 +59,7 @@ private: ImageCtxT *m_image_ctx; Journaler *m_journaler; - Mutex *m_lock; + ceph::mutex *m_lock; journal::ImageClientMeta *m_client_meta; uint64_t *m_tag_tid; journal::TagData *m_tag_data; diff --git a/src/librbd/journal/PromoteRequest.cc b/src/librbd/journal/PromoteRequest.cc index 22dc83a32d86..703a8a075bc0 100644 --- a/src/librbd/journal/PromoteRequest.cc +++ b/src/librbd/journal/PromoteRequest.cc @@ -26,7 +26,7 @@ using librbd::util::create_context_callback; template PromoteRequest::PromoteRequest(I *image_ctx, bool force, Context *on_finish) : m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish), - m_lock("PromoteRequest::m_lock") { + m_lock(ceph::make_mutex("PromoteRequest::m_lock")) { } template diff --git a/src/librbd/journal/PromoteRequest.h b/src/librbd/journal/PromoteRequest.h index 0d01f596108d..f6258066e7e0 100644 --- a/src/librbd/journal/PromoteRequest.h +++ b/src/librbd/journal/PromoteRequest.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H #include "include/int_types.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "cls/journal/cls_journal_types.h" #include "journal/Future.h" #include "librbd/journal/Types.h" @@ -71,7 +71,7 @@ private: Journaler *m_journaler = nullptr; int m_ret_val = 0; - Mutex m_lock; + ceph::mutex m_lock; ImageClientMeta m_client_meta; uint64_t m_tag_tid = 0; TagData m_tag_data; diff --git a/src/librbd/journal/RemoveRequest.h b/src/librbd/journal/RemoveRequest.h index 594e62d5eb89..13dff87a08e7 100644 --- a/src/librbd/journal/RemoveRequest.h +++ b/src/librbd/journal/RemoveRequest.h @@ -8,7 +8,6 @@ #include "include/buffer.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" -#include "common/Mutex.h" #include "librbd/ImageCtx.h" #include "journal/Journaler.h" #include "librbd/journal/TypeTraits.h" @@ -58,7 +57,7 @@ private: CephContext *m_cct; Journaler *m_journaler; SafeTimer *m_timer; - Mutex *m_timer_lock; + ceph::mutex *m_timer_lock; int m_r_saved; void stat_journal(); diff --git a/src/librbd/journal/Replay.cc b/src/librbd/journal/Replay.cc index 8f850ab5369a..d2d717545590 100644 --- a/src/librbd/journal/Replay.cc +++ b/src/librbd/journal/Replay.cc @@ -118,7 +118,7 @@ struct ExecuteOp : public Context { } ldout(cct, 20) << ": ExecuteOp::" << __func__ << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; if (image_ctx.exclusive_lock == nullptr || !image_ctx.exclusive_lock->accept_ops()) { @@ -173,7 +173,7 @@ struct C_RefreshIfRequired : public Context { template Replay::Replay(I &image_ctx) - : m_image_ctx(image_ctx), m_lock("Replay::m_lock") { + : m_image_ctx(image_ctx) { } template @@ -206,7 +206,7 @@ void Replay::process(const EventEntry &event_entry, on_ready = util::create_async_context_callback(m_image_ctx, on_ready); - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; if (m_image_ctx.exclusive_lock == nullptr || !m_image_ctx.exclusive_lock->accept_ops()) { ldout(cct, 5) << ": lost exclusive lock -- skipping event" << dendl; @@ -229,7 +229,7 @@ void Replay::shut_down(bool cancel_ops, Context *on_finish) { m_image_ctx, on_finish); { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; // safely commit any remaining AIO modify operations if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) { @@ -270,7 +270,7 @@ void Replay::shut_down(bool cancel_ops, Context *on_finish) { // execute the following outside of lock scope if (flush_comp != nullptr) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; io::ImageRequest::aio_flush(&m_image_ctx, flush_comp, io::FLUSH_SOURCE_INTERNAL, {}); } @@ -283,7 +283,7 @@ template void Replay::flush(Context *on_finish) { io::AioCompletion *aio_comp; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; aio_comp = create_aio_flush_completion( util::create_async_context_callback(m_image_ctx, on_finish)); if (aio_comp == nullptr) { @@ -291,7 +291,7 @@ void Replay::flush(Context *on_finish) { } } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; io::ImageRequest::aio_flush(&m_image_ctx, aio_comp, io::FLUSH_SOURCE_INTERNAL, {}); } @@ -301,7 +301,7 @@ void Replay::replay_op_ready(uint64_t op_tid, Context *on_resume) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": op_tid=" << op_tid << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto op_it = m_op_events.find(op_tid); ceph_assert(op_it != m_op_events.end()); @@ -357,9 +357,9 @@ void Replay::handle_event(const journal::AioDiscardEvent &event, } if (flush_required) { - m_lock.Lock(); + m_lock.lock(); auto flush_comp = create_aio_flush_completion(nullptr); - m_lock.Unlock(); + m_lock.unlock(); if (flush_comp != nullptr) { io::ImageRequest::aio_flush(&m_image_ctx, flush_comp, @@ -391,9 +391,9 @@ void Replay::handle_event(const journal::AioWriteEvent &event, } if (flush_required) { - m_lock.Lock(); + m_lock.lock(); auto flush_comp = create_aio_flush_completion(nullptr); - m_lock.Unlock(); + m_lock.unlock(); if (flush_comp != nullptr) { io::ImageRequest::aio_flush(&m_image_ctx, flush_comp, @@ -410,7 +410,7 @@ void Replay::handle_event(const journal::AioFlushEvent &event, io::AioCompletion *aio_comp; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; aio_comp = create_aio_flush_completion(on_safe); } @@ -444,9 +444,9 @@ void Replay::handle_event(const journal::AioWriteSameEvent &event, } if (flush_required) { - m_lock.Lock(); + m_lock.lock(); auto flush_comp = create_aio_flush_completion(nullptr); - m_lock.Unlock(); + m_lock.unlock(); if (flush_comp != nullptr) { io::ImageRequest::aio_flush(&m_image_ctx, flush_comp, @@ -478,9 +478,9 @@ void Replay::handle_event(const journal::AioWriteSameEvent &event, } if (flush_required) { - m_lock.Lock(); + m_lock.lock(); auto flush_comp = create_aio_flush_completion(nullptr); - m_lock.Unlock(); + m_lock.unlock(); io::ImageRequest::aio_flush(&m_image_ctx, flush_comp, io::FLUSH_SOURCE_INTERNAL, {}); @@ -499,7 +499,7 @@ void Replay::handle_event(const journal::OpFinishEvent &event, Context *on_op_complete = nullptr; Context *on_op_finish_event = nullptr; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto op_it = m_op_events.find(event.op_tid); if (op_it == m_op_events.end()) { ldout(cct, 10) << ": unable to locate associated op: assuming previously " @@ -548,7 +548,7 @@ void Replay::handle_event(const journal::SnapCreateEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap create event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -577,7 +577,7 @@ void Replay::handle_event(const journal::SnapRemoveEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap remove event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -601,7 +601,7 @@ void Replay::handle_event(const journal::SnapRenameEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap rename event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -625,7 +625,7 @@ void Replay::handle_event(const journal::SnapProtectEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap protect event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -649,7 +649,7 @@ void Replay::handle_event(const journal::SnapUnprotectEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap unprotect event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -677,7 +677,7 @@ void Replay::handle_event(const journal::SnapRollbackEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap rollback start event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -699,7 +699,7 @@ void Replay::handle_event(const journal::RenameEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Rename event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -723,7 +723,7 @@ void Replay::handle_event(const journal::ResizeEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Resize start event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -748,7 +748,7 @@ void Replay::handle_event(const journal::FlattenEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Flatten start event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -781,7 +781,7 @@ void Replay::handle_event(const journal::SnapLimitEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Snap limit event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -805,7 +805,7 @@ void Replay::handle_event(const journal::UpdateFeaturesEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Update features event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -830,7 +830,7 @@ void Replay::handle_event(const journal::MetadataSetEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Metadata set event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -852,7 +852,7 @@ void Replay::handle_event(const journal::MetadataRemoveEvent &event, CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": Metadata remove event" << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; OpEvent *op_event; Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready, on_safe, &op_event); @@ -883,7 +883,7 @@ void Replay::handle_event(const journal::UnknownEvent &event, template void Replay::handle_aio_modify_complete(Context *on_ready, Context *on_safe, int r, std::set &filters) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << ": on_ready=" << on_ready << ", " << "on_safe=" << on_safe << ", r=" << r << dendl; @@ -918,7 +918,7 @@ void Replay::handle_aio_flush_complete(Context *on_flush_safe, Context *on_aio_ready = nullptr; Context *on_flush = nullptr; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_in_flight_aio_flush > 0); ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size()); --m_in_flight_aio_flush; @@ -972,7 +972,7 @@ Context *Replay::create_op_context_callback(uint64_t op_tid, return nullptr; } - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_op_events.count(op_tid) != 0) { lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl; @@ -1001,7 +1001,7 @@ void Replay::handle_op_complete(uint64_t op_tid, int r) { OpEvent op_event; bool shutting_down = false; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; auto op_it = m_op_events.find(op_tid); ceph_assert(op_it != m_op_events.end()); @@ -1049,7 +1049,7 @@ void Replay::handle_op_complete(uint64_t op_tid, int r) { // dropped -- handle if pending Context *on_flush = nullptr; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_in_flight_op_events > 0); --m_in_flight_op_events; if (m_in_flight_op_events == 0 && @@ -1069,7 +1069,7 @@ Replay::create_aio_modify_completion(Context *on_ready, io::aio_type_t aio_type, bool *flush_required, std::set &&filters) { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; CephContext *cct = m_image_ctx.cct; ceph_assert(m_on_aio_ready == nullptr); @@ -1117,7 +1117,7 @@ Replay::create_aio_modify_completion(Context *on_ready, template io::AioCompletion *Replay::create_aio_flush_completion(Context *on_safe) { - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); CephContext *cct = m_image_ctx.cct; if (m_shut_down) { @@ -1143,9 +1143,9 @@ template bool Replay::clipped_io(uint64_t image_offset, io::AioCompletion *aio_comp) { CephContext *cct = m_image_ctx.cct; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); size_t image_size = m_image_ctx.size; - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); if (image_offset >= image_size) { // rbd-mirror image sync might race an IO event w/ associated resize between diff --git a/src/librbd/journal/Replay.h b/src/librbd/journal/Replay.h index 6e058ddb35b7..038601833cd9 100644 --- a/src/librbd/journal/Replay.h +++ b/src/librbd/journal/Replay.h @@ -7,7 +7,7 @@ #include "include/int_types.h" #include "include/buffer_fwd.h" #include "include/Context.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "librbd/io/Types.h" #include "librbd/journal/Types.h" #include @@ -119,7 +119,7 @@ private: ImageCtxT &m_image_ctx; - Mutex m_lock; + ceph::mutex m_lock = ceph::make_mutex("Replay::m_lock"); uint64_t m_in_flight_aio_flush = 0; uint64_t m_in_flight_aio_modify = 0; diff --git a/src/librbd/journal/ResetRequest.h b/src/librbd/journal/ResetRequest.h index 9cc8e2e437c1..44f5ac8a6bd7 100644 --- a/src/librbd/journal/ResetRequest.h +++ b/src/librbd/journal/ResetRequest.h @@ -8,7 +8,6 @@ #include "include/buffer.h" #include "include/rados/librados.hpp" #include "include/rbd/librbd.hpp" -#include "common/Mutex.h" #include "librbd/journal/TypeTraits.h" #include diff --git a/src/librbd/journal/Utils.cc b/src/librbd/journal/Utils.cc index 1721a9b2c941..231bcae2d320 100644 --- a/src/librbd/journal/Utils.cc +++ b/src/librbd/journal/Utils.cc @@ -32,7 +32,7 @@ int C_DecodeTag::process(int r) { return r; } - Mutex::Locker locker(*lock); + std::lock_guard locker{*lock}; *tag_tid = tag.tid; auto data_it = tag.data.cbegin(); @@ -64,7 +64,7 @@ int C_DecodeTags::process(int r) { return -ENOENT; } - Mutex::Locker locker(*lock); + std::lock_guard locker{*lock}; *tag_tid = tags.back().tid; auto data_it = tags.back().data.cbegin(); r = C_DecodeTag::decode(&data_it, tag_data); diff --git a/src/librbd/journal/Utils.h b/src/librbd/journal/Utils.h index 63d37c03726e..d22044d10375 100644 --- a/src/librbd/journal/Utils.h +++ b/src/librbd/journal/Utils.h @@ -10,7 +10,6 @@ #include struct CephContext; -struct Mutex; namespace librbd { namespace journal { @@ -21,14 +20,14 @@ namespace util { struct C_DecodeTag : public Context { CephContext *cct; - Mutex *lock; + ceph::mutex *lock; uint64_t *tag_tid; TagData *tag_data; Context *on_finish; cls::journal::Tag tag; - C_DecodeTag(CephContext *cct, Mutex *lock, uint64_t *tag_tid, + C_DecodeTag(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid, TagData *tag_data, Context *on_finish) : cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) { @@ -51,14 +50,14 @@ struct C_DecodeTags : public Context { typedef std::list Tags; CephContext *cct; - Mutex *lock; + ceph::mutex *lock; uint64_t *tag_tid; TagData *tag_data; Context *on_finish; Tags tags; - C_DecodeTags(CephContext *cct, Mutex *lock, uint64_t *tag_tid, + C_DecodeTags(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid, TagData *tag_data, Context *on_finish) : cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) { diff --git a/src/librbd/librbd.cc b/src/librbd/librbd.cc index 47f5cd5a7cc8..6c303c963fa4 100644 --- a/src/librbd/librbd.cc +++ b/src/librbd/librbd.cc @@ -1430,7 +1430,7 @@ namespace librbd { tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(), ictx->read_only); { - RWLock::RLocker timestamp_locker(ictx->timestamp_lock); + std::shared_lock timestamp_locker{ictx->timestamp_lock}; utime_t time = ictx->get_access_timestamp(); time.to_timespec(timestamp); } @@ -1444,7 +1444,7 @@ namespace librbd { tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(), ictx->read_only); { - RWLock::RLocker timestamp_locker(ictx->timestamp_lock); + std::shared_lock timestamp_locker{ictx->timestamp_lock}; utime_t time = ictx->get_modify_timestamp(); time.to_timespec(timestamp); } diff --git a/src/librbd/mirror/DemoteRequest.cc b/src/librbd/mirror/DemoteRequest.cc index c5d38752ea92..0e194dbfb173 100644 --- a/src/librbd/mirror/DemoteRequest.cc +++ b/src/librbd/mirror/DemoteRequest.cc @@ -66,9 +66,9 @@ template void DemoteRequest::acquire_lock() { CephContext *cct = m_image_ctx.cct; - m_image_ctx.owner_lock.get_read(); + m_image_ctx.owner_lock.lock_shared(); if (m_image_ctx.exclusive_lock == nullptr) { - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); lderr(cct) << "exclusive lock is not active" << dendl; finish(-EINVAL); return; @@ -80,7 +80,7 @@ void DemoteRequest::acquire_lock() { m_blocked_requests = true; if (m_image_ctx.exclusive_lock->is_lock_owner()) { - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); demote(); return; } @@ -90,7 +90,7 @@ void DemoteRequest::acquire_lock() { auto ctx = create_context_callback< DemoteRequest, &DemoteRequest::handle_acquire_lock>(this); m_image_ctx.exclusive_lock->acquire_lock(ctx); - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); } template @@ -104,16 +104,16 @@ void DemoteRequest::handle_acquire_lock(int r) { return; } - m_image_ctx.owner_lock.get_read(); + m_image_ctx.owner_lock.lock_shared(); if (m_image_ctx.exclusive_lock != nullptr && !m_image_ctx.exclusive_lock->is_lock_owner()) { r = m_image_ctx.exclusive_lock->get_unlocked_op_error(); - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); lderr(cct) << "failed to acquire exclusive lock" << dendl; finish(r); return; } - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); demote(); } @@ -146,9 +146,9 @@ void DemoteRequest::release_lock() { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << dendl; - m_image_ctx.owner_lock.get_read(); + m_image_ctx.owner_lock.lock_shared(); if (m_image_ctx.exclusive_lock == nullptr) { - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); finish(0); return; } @@ -156,7 +156,7 @@ void DemoteRequest::release_lock() { auto ctx = create_context_callback< DemoteRequest, &DemoteRequest::handle_release_lock>(this); m_image_ctx.exclusive_lock->release_lock(ctx); - m_image_ctx.owner_lock.put_read(); + m_image_ctx.owner_lock.unlock_shared(); } template @@ -179,7 +179,7 @@ void DemoteRequest::finish(int r) { } { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; if (m_blocked_requests && m_image_ctx.exclusive_lock != nullptr) { m_image_ctx.exclusive_lock->unblock_requests(); } diff --git a/src/librbd/mirror/DisableRequest.cc b/src/librbd/mirror/DisableRequest.cc index 6a21c5608d7a..ad9a525c9bd2 100644 --- a/src/librbd/mirror/DisableRequest.cc +++ b/src/librbd/mirror/DisableRequest.cc @@ -28,7 +28,7 @@ template DisableRequest::DisableRequest(I *image_ctx, bool force, bool remove, Context *on_finish) : m_image_ctx(image_ctx), m_force(force), m_remove(remove), - m_on_finish(on_finish), m_lock("mirror::DisableRequest::m_lock") { + m_on_finish(on_finish) { } template @@ -239,7 +239,7 @@ Context *DisableRequest::handle_get_clients(int *result) { return m_on_finish; } - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_current_ops.empty()); @@ -306,7 +306,7 @@ void DisableRequest::send_remove_snap(const std::string &client_id, ldout(cct, 10) << this << " " << __func__ << ": client_id=" << client_id << ", snap_name=" << snap_name << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); m_current_ops[client_id]++; @@ -328,7 +328,7 @@ Context *DisableRequest::handle_remove_snap(int *result, CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_current_ops[client_id] > 0); m_current_ops[client_id]--; @@ -353,7 +353,7 @@ void DisableRequest::send_unregister_client( CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << dendl; - ceph_assert(m_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_lock)); ceph_assert(m_current_ops[client_id] == 0); Context *ctx = create_context_callback( @@ -381,7 +381,7 @@ Context *DisableRequest::handle_unregister_client( CephContext *cct = m_image_ctx->cct; ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_current_ops[client_id] == 0); m_current_ops.erase(client_id); diff --git a/src/librbd/mirror/DisableRequest.h b/src/librbd/mirror/DisableRequest.h index 1a3b122339c8..a3eeee786764 100644 --- a/src/librbd/mirror/DisableRequest.h +++ b/src/librbd/mirror/DisableRequest.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H #include "include/buffer.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "cls/journal/cls_journal_types.h" #include "cls/rbd/cls_rbd_types.h" #include @@ -92,7 +92,8 @@ private: std::map m_ret; std::map m_current_ops; int m_error_result = 0; - mutable Mutex m_lock; + mutable ceph::mutex m_lock = + ceph::make_mutex("mirror::DisableRequest::m_lock"); void send_get_mirror_image(); Context *handle_get_mirror_image(int *result); diff --git a/src/librbd/object_map/CreateRequest.cc b/src/librbd/object_map/CreateRequest.cc index 6576fb2c2e4c..d26f929fa7be 100644 --- a/src/librbd/object_map/CreateRequest.cc +++ b/src/librbd/object_map/CreateRequest.cc @@ -33,7 +33,7 @@ void CreateRequest::send() { uint64_t max_size = m_image_ctx->size; { - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; m_snap_ids.push_back(CEPH_NOSNAP); for (auto it : m_image_ctx->snap_info) { max_size = std::max(max_size, it.second.size); diff --git a/src/librbd/object_map/CreateRequest.h b/src/librbd/object_map/CreateRequest.h index 6929abe7aad0..33984cda1b1e 100644 --- a/src/librbd/object_map/CreateRequest.h +++ b/src/librbd/object_map/CreateRequest.h @@ -5,7 +5,6 @@ #define CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H #include "include/buffer.h" -#include "common/Mutex.h" #include #include diff --git a/src/librbd/object_map/InvalidateRequest.cc b/src/librbd/object_map/InvalidateRequest.cc index c610e2b0f068..bf2db9660981 100644 --- a/src/librbd/object_map/InvalidateRequest.cc +++ b/src/librbd/object_map/InvalidateRequest.cc @@ -23,8 +23,8 @@ InvalidateRequest* InvalidateRequest::create(I &image_ctx, template void InvalidateRequest::send() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); - ceph_assert(image_ctx.image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_wlocked(image_ctx.image_lock)); uint64_t snap_flags; int r = image_ctx.get_flags(m_snap_id, &snap_flags); diff --git a/src/librbd/object_map/RefreshRequest.cc b/src/librbd/object_map/RefreshRequest.cc index e061312e9384..1af868d28734 100644 --- a/src/librbd/object_map/RefreshRequest.cc +++ b/src/librbd/object_map/RefreshRequest.cc @@ -25,7 +25,7 @@ using util::create_rados_callback; namespace object_map { template -RefreshRequest::RefreshRequest(I &image_ctx, RWLock* object_map_lock, +RefreshRequest::RefreshRequest(I &image_ctx, ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, Context *on_finish) : m_image_ctx(image_ctx), m_object_map_lock(object_map_lock), @@ -36,7 +36,7 @@ RefreshRequest::RefreshRequest(I &image_ctx, RWLock* object_map_lock, template void RefreshRequest::send() { { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; m_object_count = Striper::get_num_objects( m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id)); } @@ -52,13 +52,13 @@ template void RefreshRequest::apply() { uint64_t num_objs; { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; num_objs = Striper::get_num_objects( m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id)); } ceph_assert(m_on_disk_object_map.size() >= num_objs); - RWLock::WLocker object_map_locker(*m_object_map_lock); + std::unique_lock object_map_locker{*m_object_map_lock}; *m_object_map = m_on_disk_object_map; } @@ -173,8 +173,8 @@ void RefreshRequest::send_invalidate() { InvalidateRequest *req = InvalidateRequest::create( m_image_ctx, m_snap_id, true, ctx); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; req->send(); } @@ -207,8 +207,8 @@ void RefreshRequest::send_resize_invalidate() { InvalidateRequest *req = InvalidateRequest::create( m_image_ctx, m_snap_id, true, ctx); - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; req->send(); } @@ -278,8 +278,8 @@ void RefreshRequest::send_invalidate_and_close() { m_image_ctx, m_snap_id, false, ctx); lderr(cct) << "object map too large: " << m_object_count << dendl; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; req->send(); } @@ -295,7 +295,7 @@ Context *RefreshRequest::handle_invalidate_and_close(int *ret_val) { *ret_val = -EFBIG; } - RWLock::WLocker object_map_locker(*m_object_map_lock); + std::unique_lock object_map_locker{*m_object_map_lock}; m_object_map->clear(); return m_on_finish; } diff --git a/src/librbd/object_map/RefreshRequest.h b/src/librbd/object_map/RefreshRequest.h index 3e83dd3eee94..0bca85079d3c 100644 --- a/src/librbd/object_map/RefreshRequest.h +++ b/src/librbd/object_map/RefreshRequest.h @@ -7,6 +7,7 @@ #include "include/int_types.h" #include "include/buffer.h" #include "common/bit_vector.hpp" +#include "common/ceph_mutex.h" class Context; class RWLock; @@ -20,14 +21,15 @@ namespace object_map { template class RefreshRequest { public: - static RefreshRequest *create(ImageCtxT &image_ctx, RWLock* object_map_lock, + static RefreshRequest *create(ImageCtxT &image_ctx, + ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, Context *on_finish) { return new RefreshRequest(image_ctx, object_map_lock, object_map, snap_id, on_finish); } - RefreshRequest(ImageCtxT &image_ctx, RWLock* object_map_lock, + RefreshRequest(ImageCtxT &image_ctx, ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, Context *on_finish); @@ -61,7 +63,7 @@ private: */ ImageCtxT &m_image_ctx; - RWLock* m_object_map_lock; + ceph::shared_mutex* m_object_map_lock; ceph::BitVector<2> *m_object_map; uint64_t m_snap_id; Context *m_on_finish; diff --git a/src/librbd/object_map/RemoveRequest.cc b/src/librbd/object_map/RemoveRequest.cc index bfd80f51d8b2..a718d81fc501 100644 --- a/src/librbd/object_map/RemoveRequest.cc +++ b/src/librbd/object_map/RemoveRequest.cc @@ -21,8 +21,7 @@ using util::create_rados_callback; template RemoveRequest::RemoveRequest(I *image_ctx, Context *on_finish) - : m_image_ctx(image_ctx), m_on_finish(on_finish), - m_lock("object_map::RemoveRequest::m_lock") { + : m_image_ctx(image_ctx), m_on_finish(on_finish) { } template @@ -35,14 +34,14 @@ void RemoveRequest::send_remove_object_map() { CephContext *cct = m_image_ctx->cct; ldout(cct, 20) << __func__ << dendl; - RWLock::WLocker image_locker(m_image_ctx->image_lock); + std::unique_lock image_locker{m_image_ctx->image_lock}; std::vector snap_ids; snap_ids.push_back(CEPH_NOSNAP); for (auto it : m_image_ctx->snap_info) { snap_ids.push_back(it.first); } - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_ref_counter == 0); for (auto snap_id : snap_ids) { @@ -64,7 +63,7 @@ Context *RemoveRequest::handle_remove_object_map(int *result) { ldout(cct, 20) << __func__ << ": r=" << *result << dendl; { - Mutex::Locker locker(m_lock); + std::lock_guard locker{m_lock}; ceph_assert(m_ref_counter > 0); m_ref_counter--; diff --git a/src/librbd/object_map/RemoveRequest.h b/src/librbd/object_map/RemoveRequest.h index 1353ef9b7042..ce82e603c9ca 100644 --- a/src/librbd/object_map/RemoveRequest.h +++ b/src/librbd/object_map/RemoveRequest.h @@ -5,7 +5,7 @@ #define CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H #include "include/buffer.h" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include #include @@ -48,7 +48,8 @@ private: int m_error_result = 0; int m_ref_counter = 0; - mutable Mutex m_lock; + mutable ceph::mutex m_lock = + ceph::make_mutex("object_map::RemoveRequest::m_lock"); void send_remove_object_map(); Context *handle_remove_object_map(int *result); diff --git a/src/librbd/object_map/Request.cc b/src/librbd/object_map/Request.cc index 869027229230..e323251878af 100644 --- a/src/librbd/object_map/Request.cc +++ b/src/librbd/object_map/Request.cc @@ -57,8 +57,8 @@ bool Request::invalidate() { m_state = STATE_INVALIDATE; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx, m_snap_id, true, create_callback_context()); diff --git a/src/librbd/object_map/ResizeRequest.cc b/src/librbd/object_map/ResizeRequest.cc index 8f0f1da51c09..89e39b670d8a 100644 --- a/src/librbd/object_map/ResizeRequest.cc +++ b/src/librbd/object_map/ResizeRequest.cc @@ -32,7 +32,7 @@ void ResizeRequest::resize(ceph::BitVector<2> *object_map, uint64_t num_objs, void ResizeRequest::send() { CephContext *cct = m_image_ctx.cct; - RWLock::WLocker l(*m_object_map_lock); + std::unique_lock l{*m_object_map_lock}; m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size); std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id)); @@ -57,7 +57,7 @@ void ResizeRequest::finish_request() { ldout(cct, 5) << this << " resizing in-memory object map: " << m_num_objs << dendl; - RWLock::WLocker object_map_locker(*m_object_map_lock); + std::unique_lock object_map_locker{*m_object_map_lock}; resize(m_object_map, m_num_objs, m_default_object_state); } diff --git a/src/librbd/object_map/ResizeRequest.h b/src/librbd/object_map/ResizeRequest.h index eda8f2f097eb..dccdef133cee 100644 --- a/src/librbd/object_map/ResizeRequest.h +++ b/src/librbd/object_map/ResizeRequest.h @@ -19,7 +19,7 @@ namespace object_map { class ResizeRequest : public Request { public: - ResizeRequest(ImageCtx &image_ctx, RWLock *object_map_lock, + ResizeRequest(ImageCtx &image_ctx, ceph::shared_mutex *object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, uint64_t new_size, uint8_t default_object_state, Context *on_finish) @@ -39,7 +39,7 @@ protected: void finish_request() override; private: - RWLock* m_object_map_lock; + ceph::shared_mutex* m_object_map_lock; ceph::BitVector<2> *m_object_map; uint64_t m_num_objs; uint64_t m_new_size; diff --git a/src/librbd/object_map/SnapshotCreateRequest.cc b/src/librbd/object_map/SnapshotCreateRequest.cc index 2421adf9cbe0..729af867fd20 100644 --- a/src/librbd/object_map/SnapshotCreateRequest.cc +++ b/src/librbd/object_map/SnapshotCreateRequest.cc @@ -54,7 +54,7 @@ bool SnapshotCreateRequest::should_complete(int r) { return Request::should_complete(r); } - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; bool finished = false; switch (m_state) { case STATE_READ_MAP: @@ -75,7 +75,7 @@ bool SnapshotCreateRequest::should_complete(int r) { } void SnapshotCreateRequest::send_read_map() { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); CephContext *cct = m_image_ctx.cct; std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP)); @@ -110,7 +110,7 @@ void SnapshotCreateRequest::send_write_map() { } bool SnapshotCreateRequest::send_add_snapshot() { - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) == 0) { return true; } @@ -132,7 +132,7 @@ bool SnapshotCreateRequest::send_add_snapshot() { } void SnapshotCreateRequest::update_object_map() { - RWLock::WLocker object_map_locker(*m_object_map_lock); + std::unique_lock object_map_locker{*m_object_map_lock}; auto it = m_object_map.begin(); auto end_it = m_object_map.end(); diff --git a/src/librbd/object_map/SnapshotCreateRequest.h b/src/librbd/object_map/SnapshotCreateRequest.h index 757833acf068..3074d059d577 100644 --- a/src/librbd/object_map/SnapshotCreateRequest.h +++ b/src/librbd/object_map/SnapshotCreateRequest.h @@ -45,7 +45,7 @@ public: STATE_ADD_SNAPSHOT }; - SnapshotCreateRequest(ImageCtx &image_ctx, RWLock* object_map_lock, + SnapshotCreateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, Context *on_finish) : Request(image_ctx, snap_id, on_finish), @@ -59,7 +59,7 @@ protected: bool should_complete(int r) override; private: - RWLock* m_object_map_lock; + ceph::shared_mutex* m_object_map_lock; ceph::BitVector<2> &m_object_map; State m_state = STATE_READ_MAP; diff --git a/src/librbd/object_map/SnapshotRemoveRequest.cc b/src/librbd/object_map/SnapshotRemoveRequest.cc index 42d3ca7ab683..a29d78f74c72 100644 --- a/src/librbd/object_map/SnapshotRemoveRequest.cc +++ b/src/librbd/object_map/SnapshotRemoveRequest.cc @@ -18,8 +18,8 @@ namespace librbd { namespace object_map { void SnapshotRemoveRequest::send() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - ceph_assert(m_image_ctx.image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock)); if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) { int r = m_image_ctx.get_flags(m_snap_id, &m_flags); @@ -66,8 +66,8 @@ void SnapshotRemoveRequest::handle_load_map(int r) { lderr(cct) << "failed to load object map " << oid << ": " << cpp_strerror(r) << dendl; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; invalidate_next_map(); return; } @@ -79,8 +79,8 @@ void SnapshotRemoveRequest::remove_snapshot() { if ((m_flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) { // snapshot object map exists on disk but is invalid. cannot clean fast-diff // on next snapshot if current snapshot was invalid. - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; invalidate_next_map(); return; } @@ -112,20 +112,20 @@ void SnapshotRemoveRequest::handle_remove_snapshot(int r) { lderr(cct) << "failed to remove object map snapshot " << oid << ": " << cpp_strerror(r) << dendl; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; invalidate_next_map(); return; } - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; update_object_map(); remove_map(); } void SnapshotRemoveRequest::invalidate_next_map() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); - ceph_assert(m_image_ctx.image_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << dendl; @@ -185,7 +185,7 @@ void SnapshotRemoveRequest::handle_remove_map(int r) { } void SnapshotRemoveRequest::compute_next_snap_id() { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); m_next_snap_id = CEPH_NOSNAP; std::map::const_iterator it = @@ -199,8 +199,8 @@ void SnapshotRemoveRequest::compute_next_snap_id() { } void SnapshotRemoveRequest::update_object_map() { - assert(m_image_ctx.image_lock.is_locked()); - RWLock::WLocker object_map_locker(*m_object_map_lock); + assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); + std::unique_lock object_map_locker{*m_object_map_lock}; if (m_next_snap_id == m_image_ctx.snap_id && m_next_snap_id == CEPH_NOSNAP) { CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << dendl; diff --git a/src/librbd/object_map/SnapshotRemoveRequest.h b/src/librbd/object_map/SnapshotRemoveRequest.h index 2327fea84181..1e9c75d81d16 100644 --- a/src/librbd/object_map/SnapshotRemoveRequest.h +++ b/src/librbd/object_map/SnapshotRemoveRequest.h @@ -9,8 +9,6 @@ #include "common/bit_vector.hpp" #include "librbd/AsyncRequest.h" -class RWLock; - namespace librbd { namespace object_map { @@ -42,7 +40,7 @@ public: * otherwise, the state machine proceeds to remove the object map. */ - SnapshotRemoveRequest(ImageCtx &image_ctx, RWLock* object_map_lock, + SnapshotRemoveRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, Context *on_finish) : AsyncRequest(image_ctx, on_finish), @@ -58,7 +56,7 @@ protected: } private: - RWLock* m_object_map_lock; + ceph::shared_mutex* m_object_map_lock; ceph::BitVector<2> &m_object_map; uint64_t m_snap_id; uint64_t m_next_snap_id; diff --git a/src/librbd/object_map/SnapshotRollbackRequest.cc b/src/librbd/object_map/SnapshotRollbackRequest.cc index 3e5d54dc3d44..476a33b2e1ac 100644 --- a/src/librbd/object_map/SnapshotRollbackRequest.cc +++ b/src/librbd/object_map/SnapshotRollbackRequest.cc @@ -94,7 +94,7 @@ void SnapshotRollbackRequest::send_read_map() { } void SnapshotRollbackRequest::send_write_map() { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; CephContext *cct = m_image_ctx.cct; std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id, @@ -114,8 +114,8 @@ void SnapshotRollbackRequest::send_write_map() { } void SnapshotRollbackRequest::send_invalidate_map() { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::shared_lock owner_locker{m_image_ctx.owner_lock}; + std::unique_lock image_locker{m_image_ctx.image_lock}; CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; diff --git a/src/librbd/object_map/UpdateRequest.cc b/src/librbd/object_map/UpdateRequest.cc index 0275034135c1..53a604bcf248 100644 --- a/src/librbd/object_map/UpdateRequest.cc +++ b/src/librbd/object_map/UpdateRequest.cc @@ -33,8 +33,8 @@ void UpdateRequest::send() { template void UpdateRequest::update_object_map() { - ceph_assert(m_image_ctx.image_lock.is_locked()); - ceph_assert(m_object_map_lock->is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); + ceph_assert(ceph_mutex_is_locked(*m_object_map_lock)); CephContext *cct = m_image_ctx.cct; // break very large requests into manageable batches @@ -80,8 +80,8 @@ void UpdateRequest::handle_update_object_map(int r) { } { - RWLock::RLocker image_locker(m_image_ctx.image_lock); - RWLock::WLocker object_map_locker(*m_object_map_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; + std::unique_lock object_map_locker{*m_object_map_lock}; update_in_memory_object_map(); if (m_update_end_object_no < m_end_object_no) { @@ -97,8 +97,8 @@ void UpdateRequest::handle_update_object_map(int r) { template void UpdateRequest::update_in_memory_object_map() { - ceph_assert(m_image_ctx.image_lock.is_locked()); - ceph_assert(m_object_map_lock->is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); + ceph_assert(ceph_mutex_is_locked(*m_object_map_lock)); // rebuilding the object map might update on-disk only if (m_snap_id == m_image_ctx.snap_id) { diff --git a/src/librbd/object_map/UpdateRequest.h b/src/librbd/object_map/UpdateRequest.h index ffaa883da547..b5a72d591f3e 100644 --- a/src/librbd/object_map/UpdateRequest.h +++ b/src/librbd/object_map/UpdateRequest.h @@ -12,7 +12,6 @@ #include class Context; -class RWLock; namespace librbd { @@ -24,7 +23,7 @@ template class UpdateRequest : public Request { public: static UpdateRequest *create(ImageCtx &image_ctx, - RWLock* object_map_lock, + ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, @@ -37,7 +36,7 @@ public: on_finish); } - UpdateRequest(ImageCtx &image_ctx, RWLock* object_map_lock, + UpdateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock, ceph::BitVector<2> *object_map, uint64_t snap_id, uint64_t start_object_no, uint64_t end_object_no, uint8_t new_state, @@ -79,7 +78,7 @@ private: * @endverbatim */ - RWLock* m_object_map_lock; + ceph::shared_mutex* m_object_map_lock; ceph::BitVector<2> &m_object_map; uint64_t m_start_object_no; uint64_t m_end_object_no; diff --git a/src/librbd/operation/DisableFeaturesRequest.cc b/src/librbd/operation/DisableFeaturesRequest.cc index dc58a989ab95..3dea085aff1a 100644 --- a/src/librbd/operation/DisableFeaturesRequest.cc +++ b/src/librbd/operation/DisableFeaturesRequest.cc @@ -41,7 +41,7 @@ template void DisableFeaturesRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features << dendl; @@ -94,7 +94,7 @@ void DisableFeaturesRequest::send_block_writes() { CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; image_ctx.io_work_queue->block_writes(create_context_callback< DisableFeaturesRequest, &DisableFeaturesRequest::handle_block_writes>(this)); @@ -113,7 +113,7 @@ Context *DisableFeaturesRequest::handle_block_writes(int *result) { m_writes_blocked = true; { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; // avoid accepting new requests from peers while we manipulate // the image features if (image_ctx.exclusive_lock != nullptr && @@ -139,7 +139,7 @@ void DisableFeaturesRequest::send_acquire_exclusive_lock() { &DisableFeaturesRequest::handle_acquire_exclusive_lock>(this); { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; // if disabling features w/ exclusive lock supported, we need to // acquire the lock to temporarily block IO against the image if (image_ctx.exclusive_lock != nullptr && @@ -160,16 +160,16 @@ Context *DisableFeaturesRequest::handle_acquire_exclusive_lock(int *result) { CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl; - image_ctx.owner_lock.get_read(); + image_ctx.owner_lock.lock_shared(); if (*result < 0) { lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl; - image_ctx.owner_lock.put_read(); + image_ctx.owner_lock.unlock_shared(); return handle_finish(*result); } else if (image_ctx.exclusive_lock != nullptr && !image_ctx.exclusive_lock->is_lock_owner()) { lderr(cct) << "failed to acquire exclusive lock" << dendl; *result = image_ctx.exclusive_lock->get_unlocked_op_error(); - image_ctx.owner_lock.put_read(); + image_ctx.owner_lock.unlock_shared(); return handle_finish(*result); } @@ -205,7 +205,7 @@ Context *DisableFeaturesRequest::handle_acquire_exclusive_lock(int *result) { m_disable_flags |= RBD_FLAG_OBJECT_MAP_INVALID; } } while (false); - image_ctx.owner_lock.put_read(); + image_ctx.owner_lock.unlock_shared(); if (*result < 0) { return handle_finish(*result); @@ -356,7 +356,7 @@ void DisableFeaturesRequest::send_close_journal() { CephContext *cct = image_ctx.cct; { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; if (image_ctx.journal != nullptr) { ldout(cct, 20) << this << " " << __func__ << dendl; @@ -628,7 +628,7 @@ Context *DisableFeaturesRequest::handle_finish(int r) { ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) { image_ctx.exclusive_lock->unblock_requests(); } diff --git a/src/librbd/operation/EnableFeaturesRequest.cc b/src/librbd/operation/EnableFeaturesRequest.cc index e2c1113d0e9f..c01cca7ecdc0 100644 --- a/src/librbd/operation/EnableFeaturesRequest.cc +++ b/src/librbd/operation/EnableFeaturesRequest.cc @@ -38,7 +38,7 @@ template void EnableFeaturesRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features << dendl; @@ -90,7 +90,7 @@ void EnableFeaturesRequest::send_block_writes() { CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; image_ctx.io_work_queue->block_writes(create_context_callback< EnableFeaturesRequest, &EnableFeaturesRequest::handle_block_writes>(this)); @@ -163,7 +163,7 @@ Context *EnableFeaturesRequest::handle_get_mirror_mode(int *result) { bool create_journal = false; do { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; // avoid accepting new requests from peers while we manipulate // the image features @@ -469,7 +469,7 @@ Context *EnableFeaturesRequest::handle_finish(int r) { ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl; { - RWLock::WLocker locker(image_ctx.owner_lock); + std::unique_lock locker{image_ctx.owner_lock}; if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) { image_ctx.exclusive_lock->unblock_requests(); diff --git a/src/librbd/operation/FlattenRequest.cc b/src/librbd/operation/FlattenRequest.cc index 59dc8b558cf0..d94e8421083f 100644 --- a/src/librbd/operation/FlattenRequest.cc +++ b/src/librbd/operation/FlattenRequest.cc @@ -36,7 +36,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; if (image_ctx.exclusive_lock != nullptr && @@ -46,7 +46,7 @@ public: } { - RWLock::RLocker image_lock(image_ctx.image_lock); + std::shared_lock image_lock{image_ctx.image_lock}; if (image_ctx.object_map != nullptr && !image_ctx.object_map->object_may_not_exist(m_object_no)) { // can skip because the object already exists @@ -93,12 +93,12 @@ void FlattenRequest::send_op() { template void FlattenRequest::flatten_objects() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - assert(image_ctx.owner_lock.is_locked()); + assert(ceph_mutex_is_locked(image_ctx.owner_lock)); auto ctx = create_context_callback< FlattenRequest, &FlattenRequest::handle_flatten_objects>(this); @@ -136,22 +136,22 @@ void FlattenRequest::detach_child() { CephContext *cct = image_ctx.cct; // should have been canceled prior to releasing lock - image_ctx.owner_lock.get_read(); + image_ctx.owner_lock.lock_shared(); ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); // if there are no snaps, remove from the children object as well // (if snapshots remain, they have their own parent info, and the child // will be removed when the last snap goes away) - image_ctx.image_lock.get_read(); + image_ctx.image_lock.lock_shared(); if ((image_ctx.features & RBD_FEATURE_DEEP_FLATTEN) == 0 && !image_ctx.snaps.empty()) { - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); detach_parent(); return; } - image_ctx.image_lock.put_read(); + image_ctx.image_lock.unlock_shared(); ldout(cct, 5) << dendl; auto ctx = create_context_callback< @@ -159,7 +159,7 @@ void FlattenRequest::detach_child() { &FlattenRequest::handle_detach_child>(this); auto req = image::DetachChildRequest::create(image_ctx, ctx); req->send(); - image_ctx.owner_lock.put_read(); + image_ctx.owner_lock.unlock_shared(); } template @@ -184,21 +184,21 @@ void FlattenRequest::detach_parent() { ldout(cct, 5) << dendl; // should have been canceled prior to releasing lock - image_ctx.owner_lock.get_read(); + image_ctx.owner_lock.lock_shared(); ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); // stop early if the parent went away - it just means // another flatten finished first, so this one is useless. - image_ctx.image_lock.get_read(); + image_ctx.image_lock.lock_shared(); if (!image_ctx.parent) { ldout(cct, 5) << "image already flattened" << dendl; - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); this->complete(0); return; } - image_ctx.image_lock.put_read(); + image_ctx.image_lock.unlock_shared(); // remove parent from this (base) image auto ctx = create_context_callback< @@ -206,7 +206,7 @@ void FlattenRequest::detach_parent() { &FlattenRequest::handle_detach_parent>(this); auto req = image::DetachParentRequest::create(image_ctx, ctx); req->send(); - image_ctx.owner_lock.put_read(); + image_ctx.owner_lock.unlock_shared(); } template diff --git a/src/librbd/operation/MetadataRemoveRequest.cc b/src/librbd/operation/MetadataRemoveRequest.cc index 828e7a5b6fd5..c5d6141adb88 100644 --- a/src/librbd/operation/MetadataRemoveRequest.cc +++ b/src/librbd/operation/MetadataRemoveRequest.cc @@ -40,7 +40,7 @@ bool MetadataRemoveRequest::should_complete(int r) { template void MetadataRemoveRequest::send_metadata_remove() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; diff --git a/src/librbd/operation/MetadataSetRequest.cc b/src/librbd/operation/MetadataSetRequest.cc index 760e9b1e32e4..5fb939352e2f 100644 --- a/src/librbd/operation/MetadataSetRequest.cc +++ b/src/librbd/operation/MetadataSetRequest.cc @@ -41,7 +41,7 @@ bool MetadataSetRequest::should_complete(int r) { template void MetadataSetRequest::send_metadata_set() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; diff --git a/src/librbd/operation/MigrateRequest.cc b/src/librbd/operation/MigrateRequest.cc index 9f698f7e1aa2..2bb1207007b6 100644 --- a/src/librbd/operation/MigrateRequest.cc +++ b/src/librbd/operation/MigrateRequest.cc @@ -40,7 +40,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; if (image_ctx.exclusive_lock != nullptr && @@ -62,7 +62,7 @@ private: void start_async_op() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 10) << dendl; @@ -95,13 +95,13 @@ private: return; } - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; start_async_op(); } bool is_within_overlap_bounds() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; auto overlap = std::min(image_ctx.size, image_ctx.migration_info.overlap); return overlap > 0 && @@ -110,7 +110,7 @@ private: void migrate_object() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; auto ctx = create_context_callback< @@ -158,7 +158,7 @@ private: template void MigrateRequest::send_op() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 10) << dendl; @@ -182,7 +182,7 @@ template void MigrateRequest::migrate_objects() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); uint64_t overlap_objects = get_num_overlap_objects(); @@ -219,7 +219,7 @@ uint64_t MigrateRequest::get_num_overlap_objects() { CephContext *cct = image_ctx.cct; ldout(cct, 10) << dendl; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; auto overlap = image_ctx.migration_info.overlap; diff --git a/src/librbd/operation/ObjectMapIterate.cc b/src/librbd/operation/ObjectMapIterate.cc index ce8cced13390..37f9303fc3ba 100644 --- a/src/librbd/operation/ObjectMapIterate.cc +++ b/src/librbd/operation/ObjectMapIterate.cc @@ -91,7 +91,7 @@ private: void send_list_snaps() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ldout(image_ctx.cct, 5) << m_oid << " C_VerifyObjectCallback::send_list_snaps" << dendl; @@ -107,7 +107,7 @@ private: uint8_t get_object_state() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; for (std::vector::const_iterator r = m_snap_set.clones.begin(); r != m_snap_set.clones.end(); ++r) { librados::snap_t from_snap_id; @@ -137,7 +137,7 @@ private: uint64_t next_valid_snap_id(uint64_t snap_id) { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); std::map::iterator it = image_ctx.snap_info.lower_bound(snap_id); @@ -150,13 +150,13 @@ private: bool object_map_action(uint8_t new_state) { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; // should have been canceled prior to releasing lock ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; ceph_assert(image_ctx.object_map != nullptr); uint8_t state = (*image_ctx.object_map)[m_object_no]; @@ -204,7 +204,7 @@ bool ObjectMapIterateRequest::should_complete(int r) { << cpp_strerror(r) << dendl; } - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; switch (m_state) { case STATE_VERIFY_OBJECTS: if (m_invalidate.test_and_set()) { @@ -235,13 +235,13 @@ bool ObjectMapIterateRequest::should_complete(int r) { template void ObjectMapIterateRequest::send_verify_objects() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; uint64_t snap_id; uint64_t num_objects; { - RWLock::RLocker l(m_image_ctx.image_lock); + std::shared_lock l{m_image_ctx.image_lock}; snap_id = m_image_ctx.snap_id; num_objects = Striper::get_num_objects(m_image_ctx.layout, m_image_ctx.get_image_size(snap_id)); @@ -263,7 +263,7 @@ void ObjectMapIterateRequest::send_verify_objects() { template uint64_t ObjectMapIterateRequest::get_image_size() const { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); if (m_image_ctx.snap_id == CEPH_NOSNAP) { if (!m_image_ctx.resize_reqs.empty()) { return m_image_ctx.resize_reqs.front()->get_image_size(); @@ -286,8 +286,8 @@ void ObjectMapIterateRequest::send_invalidate_object_map() { true, this->create_callback_context()); - ceph_assert(m_image_ctx.owner_lock.is_locked()); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); + std::unique_lock image_locker{m_image_ctx.image_lock}; req->send(); } diff --git a/src/librbd/operation/RebuildObjectMapRequest.cc b/src/librbd/operation/RebuildObjectMapRequest.cc index 2bed8f9ec9fa..1ee15015dab1 100644 --- a/src/librbd/operation/RebuildObjectMapRequest.cc +++ b/src/librbd/operation/RebuildObjectMapRequest.cc @@ -34,7 +34,7 @@ bool RebuildObjectMapRequest::should_complete(int r) { CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl; - RWLock::RLocker owner_lock(m_image_ctx.owner_lock); + std::shared_lock owner_lock{m_image_ctx.owner_lock}; switch (m_state) { case STATE_RESIZE_OBJECT_MAP: ldout(cct, 5) << "RESIZE_OBJECT_MAP" << dendl; @@ -93,17 +93,17 @@ bool RebuildObjectMapRequest::should_complete(int r) { template void RebuildObjectMapRequest::send_resize_object_map() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; - m_image_ctx.image_lock.get_read(); + m_image_ctx.image_lock.lock_shared(); ceph_assert(m_image_ctx.object_map != nullptr); uint64_t size = get_image_size(); uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size); if (m_image_ctx.object_map->size() == num_objects) { - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); send_verify_objects(); return; } @@ -117,14 +117,14 @@ void RebuildObjectMapRequest::send_resize_object_map() { m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT, this->create_callback_context()); - m_image_ctx.image_lock.put_read(); + m_image_ctx.image_lock.unlock_shared(); } template void RebuildObjectMapRequest::send_trim_image() { CephContext *cct = m_image_ctx.cct; - RWLock::RLocker l(m_image_ctx.owner_lock); + std::shared_lock l{m_image_ctx.owner_lock}; // should have been canceled prior to releasing lock ceph_assert(m_image_ctx.exclusive_lock == nullptr || @@ -135,7 +135,7 @@ void RebuildObjectMapRequest::send_trim_image() { uint64_t new_size; uint64_t orig_size; { - RWLock::RLocker l(m_image_ctx.image_lock); + std::shared_lock l{m_image_ctx.image_lock}; ceph_assert(m_image_ctx.object_map != nullptr); new_size = get_image_size(); @@ -173,7 +173,7 @@ bool update_object_map(I& image_ctx, uint64_t object_no, uint8_t current_state, template void RebuildObjectMapRequest::send_verify_objects() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; m_state = STATE_VERIFY_OBJECTS; @@ -189,7 +189,7 @@ void RebuildObjectMapRequest::send_verify_objects() { template void RebuildObjectMapRequest::send_save_object_map() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << this << " send_save_object_map" << dendl; @@ -199,14 +199,14 @@ void RebuildObjectMapRequest::send_save_object_map() { ceph_assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner()); - RWLock::RLocker image_locker(m_image_ctx.image_lock); + std::shared_lock image_locker{m_image_ctx.image_lock}; ceph_assert(m_image_ctx.object_map != nullptr); m_image_ctx.object_map->aio_save(this->create_callback_context()); } template void RebuildObjectMapRequest::send_update_header() { - ceph_assert(m_image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); // should have been canceled prior to releasing lock ceph_assert(m_image_ctx.exclusive_lock == nullptr || @@ -225,13 +225,13 @@ void RebuildObjectMapRequest::send_update_header() { ceph_assert(r == 0); comp->release(); - RWLock::WLocker image_locker(m_image_ctx.image_lock); + std::unique_lock image_locker{m_image_ctx.image_lock}; m_image_ctx.update_flags(m_image_ctx.snap_id, flags, false); } template uint64_t RebuildObjectMapRequest::get_image_size() const { - ceph_assert(m_image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock)); if (m_image_ctx.snap_id == CEPH_NOSNAP) { if (!m_image_ctx.resize_reqs.empty()) { return m_image_ctx.resize_reqs.front()->get_image_size(); diff --git a/src/librbd/operation/RenameRequest.cc b/src/librbd/operation/RenameRequest.cc index 823e82dd1412..f62b549c6b9e 100644 --- a/src/librbd/operation/RenameRequest.cc +++ b/src/librbd/operation/RenameRequest.cc @@ -81,7 +81,7 @@ bool RenameRequest::should_complete(int r) { return true; } - RWLock::RLocker owner_lock(image_ctx.owner_lock); + std::shared_lock owner_lock{image_ctx.owner_lock}; switch (m_state) { case STATE_READ_SOURCE_HEADER: send_write_destination_header(); @@ -105,7 +105,7 @@ int RenameRequest::filter_return_code(int r) const { CephContext *cct = image_ctx.cct; if (m_state == STATE_READ_SOURCE_HEADER && r == -ENOENT) { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.name == m_dest_name) { // signal that replay raced with itself return -EEXIST; diff --git a/src/librbd/operation/Request.cc b/src/librbd/operation/Request.cc index 3f50acd5abb6..631d8dd5b201 100644 --- a/src/librbd/operation/Request.cc +++ b/src/librbd/operation/Request.cc @@ -21,8 +21,8 @@ Request::Request(I &image_ctx, Context *on_finish, uint64_t journal_op_tid) template void Request::send() { - I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + [[maybe_unused]] I &image_ctx = this->m_image_ctx; + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); // automatically create the event if we don't need to worry // about affecting concurrent IO ops @@ -74,8 +74,8 @@ template bool Request::append_op_event() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); - RWLock::RLocker image_locker(image_ctx.image_lock); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.journal != nullptr && image_ctx.journal->is_journal_appending()) { append_op_event(util::create_context_callback< @@ -88,7 +88,7 @@ bool Request::append_op_event() { template bool Request::commit_op_event(int r) { I &image_ctx = this->m_image_ctx; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (!m_appended_op_event) { return false; @@ -131,8 +131,8 @@ void Request::handle_commit_op_event(int r, int original_ret_val) { template void Request::replay_op_ready(Context *on_safe) { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); ceph_assert(m_op_tid != 0); m_appended_op_event = true; @@ -143,8 +143,8 @@ void Request::replay_op_ready(Context *on_safe) { template void Request::append_op_event(Context *on_safe) { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; @@ -170,7 +170,7 @@ void Request::handle_op_event_safe(int r) { ceph_assert(!can_affect_io()); // haven't started the request state machine yet - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; send_op(); } } diff --git a/src/librbd/operation/Request.h b/src/librbd/operation/Request.h index c1ca11ddc6de..e32b49644ceb 100644 --- a/src/librbd/operation/Request.h +++ b/src/librbd/operation/Request.h @@ -38,8 +38,7 @@ protected: ImageCtxT &image_ctx = this->m_image_ctx; ceph_assert(can_affect_io()); - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::scoped_lock locker{image_ctx.owner_lock, image_ctx.image_lock}; if (image_ctx.journal != nullptr) { if (image_ctx.journal->is_journal_replaying()) { Context *ctx = util::create_context_callback(request); diff --git a/src/librbd/operation/ResizeRequest.cc b/src/librbd/operation/ResizeRequest.cc index 7450bd4bd98d..464dd2af09ce 100644 --- a/src/librbd/operation/ResizeRequest.cc +++ b/src/librbd/operation/ResizeRequest.cc @@ -43,7 +43,7 @@ ResizeRequest::~ResizeRequest() { I &image_ctx = this->m_image_ctx; ResizeRequest *next_req = NULL; { - RWLock::WLocker image_locker(image_ctx.image_lock); + std::unique_lock image_locker{image_ctx.image_lock}; ceph_assert(m_xlist_item.remove_myself()); if (!image_ctx.resize_reqs.empty()) { next_req = image_ctx.resize_reqs.front(); @@ -51,7 +51,7 @@ ResizeRequest::~ResizeRequest() { } if (next_req != NULL) { - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; next_req->send(); } } @@ -59,10 +59,10 @@ ResizeRequest::~ResizeRequest() { template void ResizeRequest::send() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); { - RWLock::WLocker image_locker(image_ctx.image_lock); + std::unique_lock image_locker{image_ctx.image_lock}; if (!m_xlist_item.is_on_list()) { image_ctx.resize_reqs.push_back(&m_xlist_item); if (image_ctx.resize_reqs.front() != this) { @@ -80,8 +80,8 @@ void ResizeRequest::send() { template void ResizeRequest::send_op() { - I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + [[maybe_unused]] I &image_ctx = this->m_image_ctx; + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); if (this->is_canceled()) { this->async_complete(-ERESTART); @@ -158,7 +158,7 @@ void ResizeRequest::send_trim_image() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; TrimRequest *req = TrimRequest::create( image_ctx, create_context_callback< ResizeRequest, &ResizeRequest::handle_trim_image>(this), @@ -191,7 +191,7 @@ void ResizeRequest::send_flush_cache() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; auto ctx = create_context_callback< ResizeRequest, &ResizeRequest::handle_flush_cache>(this); auto aio_comp = io::AioCompletion::create_and_start( @@ -225,7 +225,7 @@ void ResizeRequest::send_invalidate_cache() { // need to invalidate since we're deleting objects, and // ObjectCacher doesn't track non-existent objects - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; image_ctx.io_object_dispatcher->invalidate_cache(create_context_callback< ResizeRequest, &ResizeRequest::handle_invalidate_cache>(this)); } @@ -254,7 +254,7 @@ Context *ResizeRequest::send_grow_object_map() { I &image_ctx = this->m_image_ctx; { - RWLock::WLocker image_locker(image_ctx.image_lock); + std::unique_lock image_locker{image_ctx.image_lock}; m_shrink_size_visible = true; } @@ -267,11 +267,11 @@ Context *ResizeRequest::send_grow_object_map() { return nullptr; } - image_ctx.owner_lock.get_read(); - image_ctx.image_lock.get_read(); + image_ctx.owner_lock.lock_shared(); + image_ctx.image_lock.lock_shared(); if (image_ctx.object_map == nullptr) { - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); // IO is still blocked send_update_header(); @@ -288,8 +288,8 @@ Context *ResizeRequest::send_grow_object_map() { image_ctx.object_map->aio_resize( m_new_size, OBJECT_NONEXISTENT, create_context_callback< ResizeRequest, &ResizeRequest::handle_grow_object_map>(this)); - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); return nullptr; } @@ -315,11 +315,11 @@ template Context *ResizeRequest::send_shrink_object_map() { I &image_ctx = this->m_image_ctx; - image_ctx.owner_lock.get_read(); - image_ctx.image_lock.get_read(); + image_ctx.owner_lock.lock_shared(); + image_ctx.image_lock.lock_shared(); if (image_ctx.object_map == nullptr || m_new_size > m_original_size) { - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); update_size_and_overlap(); return this->create_context_finisher(0); @@ -336,8 +336,8 @@ Context *ResizeRequest::send_shrink_object_map() { image_ctx.object_map->aio_resize( m_new_size, OBJECT_NONEXISTENT, create_context_callback< ResizeRequest, &ResizeRequest::handle_shrink_object_map>(this)); - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); return nullptr; } @@ -364,7 +364,7 @@ void ResizeRequest::send_post_block_writes() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; image_ctx.io_work_queue->block_writes(create_context_callback< ResizeRequest, &ResizeRequest::handle_post_block_writes>(this)); } @@ -394,7 +394,7 @@ void ResizeRequest::send_update_header() { << "new_size=" << m_new_size << dendl;; // should have been canceled prior to releasing lock - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); @@ -436,7 +436,7 @@ Context *ResizeRequest::handle_update_header(int *result) { template void ResizeRequest::compute_parent_overlap() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); if (image_ctx.parent == NULL) { m_new_parent_overlap = 0; @@ -449,7 +449,7 @@ template void ResizeRequest::update_size_and_overlap() { I &image_ctx = this->m_image_ctx; { - RWLock::WLocker image_locker(image_ctx.image_lock); + std::unique_lock image_locker{image_ctx.image_lock}; image_ctx.size = m_new_size; if (image_ctx.parent != NULL && m_new_size < m_original_size) { diff --git a/src/librbd/operation/SnapshotCreateRequest.cc b/src/librbd/operation/SnapshotCreateRequest.cc index e6d0e9b9b0ed..763625b4ac91 100644 --- a/src/librbd/operation/SnapshotCreateRequest.cc +++ b/src/librbd/operation/SnapshotCreateRequest.cc @@ -63,7 +63,7 @@ Context *SnapshotCreateRequest::handle_suspend_requests(int *result) { template void SnapshotCreateRequest::send_suspend_aio() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -158,8 +158,8 @@ void SnapshotCreateRequest::send_create_snap() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; // should have been canceled prior to releasing lock ceph_assert(image_ctx.exclusive_lock == nullptr || @@ -207,9 +207,9 @@ template Context *SnapshotCreateRequest::send_create_object_map() { I &image_ctx = this->m_image_ctx; - image_ctx.image_lock.get_read(); + image_ctx.image_lock.lock_shared(); if (image_ctx.object_map == nullptr || m_skip_object_map) { - image_ctx.image_lock.put_read(); + image_ctx.image_lock.unlock_shared(); update_snap_context(); image_ctx.io_work_queue->unblock_writes(); @@ -223,7 +223,7 @@ Context *SnapshotCreateRequest::send_create_object_map() { m_snap_id, create_context_callback< SnapshotCreateRequest, &SnapshotCreateRequest::handle_create_object_map>(this)); - image_ctx.image_lock.put_read(); + image_ctx.image_lock.unlock_shared(); return nullptr; } @@ -276,8 +276,8 @@ template void SnapshotCreateRequest::update_snap_context() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::WLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::unique_lock image_locker{image_ctx.image_lock}; if (image_ctx.old_format) { return; } diff --git a/src/librbd/operation/SnapshotLimitRequest.cc b/src/librbd/operation/SnapshotLimitRequest.cc index 5e4dce9e32b3..17aed5f6a501 100644 --- a/src/librbd/operation/SnapshotLimitRequest.cc +++ b/src/librbd/operation/SnapshotLimitRequest.cc @@ -40,13 +40,13 @@ bool SnapshotLimitRequest::should_complete(int r) { template void SnapshotLimitRequest::send_limit_snaps() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; librados::ObjectWriteOperation op; cls_client::snapshot_set_limit(&op, m_snap_limit); diff --git a/src/librbd/operation/SnapshotProtectRequest.cc b/src/librbd/operation/SnapshotProtectRequest.cc index 92197f09dd6b..f3b9e7e0b76f 100644 --- a/src/librbd/operation/SnapshotProtectRequest.cc +++ b/src/librbd/operation/SnapshotProtectRequest.cc @@ -61,7 +61,7 @@ bool SnapshotProtectRequest::should_complete(int r) { template void SnapshotProtectRequest::send_protect_snap() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -76,7 +76,7 @@ void SnapshotProtectRequest::send_protect_snap() { template int SnapshotProtectRequest::verify_and_send_protect_snap() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; CephContext *cct = image_ctx.cct; if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) { diff --git a/src/librbd/operation/SnapshotRemoveRequest.cc b/src/librbd/operation/SnapshotRemoveRequest.cc index a7088687b4d4..e120aea4ae80 100644 --- a/src/librbd/operation/SnapshotRemoveRequest.cc +++ b/src/librbd/operation/SnapshotRemoveRequest.cc @@ -4,6 +4,7 @@ #include "librbd/operation/SnapshotRemoveRequest.h" #include "common/dout.h" #include "common/errno.h" +#include "include/ceph_assert.h" #include "cls/rbd/cls_rbd_client.h" #include "librbd/ExclusiveLock.h" #include "librbd/ImageCtx.h" @@ -36,9 +37,9 @@ void SnapshotRemoveRequest::send_op() { I &image_ctx = this->m_image_ctx; CephContext *cct = image_ctx.cct; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.snap_info.find(m_snap_id) == image_ctx.snap_info.end()) { lderr(cct) << "snapshot doesn't exist" << dendl; this->async_complete(-ENOENT); @@ -158,7 +159,7 @@ void SnapshotRemoveRequest::detach_child() { bool detach_child = false; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; cls::rbd::ParentImageSpec our_pspec; int r = image_ctx.get_parent_spec(m_snap_id, &our_pspec); @@ -223,8 +224,8 @@ void SnapshotRemoveRequest::remove_object_map() { CephContext *cct = image_ctx.cct; { - RWLock::RLocker owner_lock(image_ctx.owner_lock); - RWLock::WLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_lock{image_ctx.owner_lock}; + std::unique_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr) { ldout(cct, 5) << dendl; @@ -330,7 +331,7 @@ void SnapshotRemoveRequest::remove_snap_context() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - RWLock::WLocker image_locker(image_ctx.image_lock); + std::unique_lock image_locker{image_ctx.image_lock}; image_ctx.rm_snap(m_snap_namespace, m_snap_name, m_snap_id); } @@ -338,7 +339,7 @@ template int SnapshotRemoveRequest::scan_for_parents( cls::rbd::ParentImageSpec &pspec) { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); if (pspec.pool_id != -1) { map::iterator it; diff --git a/src/librbd/operation/SnapshotRenameRequest.cc b/src/librbd/operation/SnapshotRenameRequest.cc index 973b306b0811..a957074b6794 100644 --- a/src/librbd/operation/SnapshotRenameRequest.cc +++ b/src/librbd/operation/SnapshotRenameRequest.cc @@ -41,7 +41,7 @@ SnapshotRenameRequest::SnapshotRenameRequest(I &image_ctx, template journal::Event SnapshotRenameRequest::create_event(uint64_t op_tid) const { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.image_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock)); std::string src_snap_name; auto snap_info_it = image_ctx.snap_info.find(m_snap_id); @@ -77,8 +77,8 @@ bool SnapshotRenameRequest::should_complete(int r) { template void SnapshotRenameRequest::send_rename_snap() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); - RWLock::RLocker image_locker(image_ctx.image_lock); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); + std::shared_lock image_locker{image_ctx.image_lock}; CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; diff --git a/src/librbd/operation/SnapshotRollbackRequest.cc b/src/librbd/operation/SnapshotRollbackRequest.cc index fa4725231bc8..e718259b5fbc 100644 --- a/src/librbd/operation/SnapshotRollbackRequest.cc +++ b/src/librbd/operation/SnapshotRollbackRequest.cc @@ -47,7 +47,7 @@ public: << m_object_num << dendl; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (m_object_num < m_head_num_objects && m_snap_object_map != nullptr && !image_ctx.object_map->object_may_exist(m_object_num) && @@ -139,8 +139,8 @@ void SnapshotRollbackRequest::send_resize_image() { uint64_t current_size; { - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; current_size = image_ctx.get_image_size(CEPH_NOSNAP); } @@ -154,7 +154,7 @@ void SnapshotRollbackRequest::send_resize_image() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; Context *ctx = create_context_callback< SnapshotRollbackRequest, &SnapshotRollbackRequest::handle_resize_image>(this); @@ -187,8 +187,8 @@ void SnapshotRollbackRequest::send_get_snap_object_map() { bool object_map_enabled; CephContext *cct = image_ctx.cct; { - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; object_map_enabled = (image_ctx.object_map != nullptr); int r = image_ctx.get_flags(m_snap_id, &flags); if (r < 0) { @@ -238,8 +238,8 @@ void SnapshotRollbackRequest::send_rollback_object_map() { I &image_ctx = this->m_image_ctx; { - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr) { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -280,10 +280,10 @@ void SnapshotRollbackRequest::send_rollback_objects() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; uint64_t num_objects; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; num_objects = Striper::get_num_objects(image_ctx.layout, image_ctx.get_current_size()); } @@ -325,8 +325,8 @@ Context *SnapshotRollbackRequest::send_refresh_object_map() { bool object_map_enabled; { - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; object_map_enabled = (image_ctx.object_map != nullptr); } if (!object_map_enabled) { @@ -373,7 +373,7 @@ Context *SnapshotRollbackRequest::send_invalidate_cache() { CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; - RWLock::RLocker owner_lock(image_ctx.owner_lock); + std::shared_lock owner_lock{image_ctx.owner_lock}; Context *ctx = create_context_callback< SnapshotRollbackRequest, &SnapshotRollbackRequest::handle_invalidate_cache>(this); @@ -398,8 +398,8 @@ template void SnapshotRollbackRequest::apply() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::WLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::unique_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr) { std::swap(m_object_map, image_ctx.object_map); } diff --git a/src/librbd/operation/SnapshotUnprotectRequest.cc b/src/librbd/operation/SnapshotUnprotectRequest.cc index 12e6383455f8..76caf68f335e 100644 --- a/src/librbd/operation/SnapshotUnprotectRequest.cc +++ b/src/librbd/operation/SnapshotUnprotectRequest.cc @@ -64,7 +64,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'" @@ -183,7 +183,7 @@ bool SnapshotUnprotectRequest::should_complete(int r) { return should_complete_error(); } - RWLock::RLocker owner_lock(image_ctx.owner_lock); + std::shared_lock owner_lock{image_ctx.owner_lock}; bool finished = false; switch (m_state) { case STATE_UNPROTECT_SNAP_START: @@ -205,7 +205,7 @@ bool SnapshotUnprotectRequest::should_complete(int r) { template bool SnapshotUnprotectRequest::should_complete_error() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker owner_locker(image_ctx.owner_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; CephContext *cct = image_ctx.cct; lderr(cct) << this << " " << __func__ << ": " << "ret_val=" << m_ret_val << dendl; @@ -222,7 +222,7 @@ bool SnapshotUnprotectRequest::should_complete_error() { template void SnapshotUnprotectRequest::send_unprotect_snap_start() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -237,7 +237,7 @@ void SnapshotUnprotectRequest::send_unprotect_snap_start() { template void SnapshotUnprotectRequest::send_scan_pool_children() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -270,7 +270,7 @@ void SnapshotUnprotectRequest::send_scan_pool_children() { template void SnapshotUnprotectRequest::send_unprotect_snap_finish() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -290,7 +290,7 @@ void SnapshotUnprotectRequest::send_unprotect_snap_finish() { template void SnapshotUnprotectRequest::send_unprotect_snap_rollback() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << this << " " << __func__ << dendl; @@ -310,7 +310,7 @@ void SnapshotUnprotectRequest::send_unprotect_snap_rollback() { template int SnapshotUnprotectRequest::verify_and_send_unprotect_snap_start() { I &image_ctx = this->m_image_ctx; - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; CephContext *cct = image_ctx.cct; if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) { diff --git a/src/librbd/operation/SparsifyRequest.cc b/src/librbd/operation/SparsifyRequest.cc index 537a0ca41eb8..0c5a4efab4b6 100644 --- a/src/librbd/operation/SparsifyRequest.cc +++ b/src/librbd/operation/SparsifyRequest.cc @@ -120,7 +120,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ldout(m_cct, 20) << dendl; @@ -131,7 +131,7 @@ public: } { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr && !image_ctx.object_map->object_may_exist(m_object_no)) { // can skip because the object does not exist @@ -204,14 +204,14 @@ public: ldout(m_cct, 20) << dendl; - image_ctx.owner_lock.get_read(); - image_ctx.image_lock.get_read(); + image_ctx.owner_lock.lock_shared(); + image_ctx.image_lock.lock_shared(); if (image_ctx.object_map == nullptr) { // possible that exclusive lock was lost in background lderr(m_cct) << "object map is not initialized" << dendl; - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); finish_op(-EINVAL); return; } @@ -220,8 +220,8 @@ public: m_finish_op_ctx = image_ctx.exclusive_lock->start_op(&r); if (m_finish_op_ctx == nullptr) { lderr(m_cct) << "lost exclusive lock" << dendl; - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); finish_op(r); return; } @@ -235,8 +235,8 @@ public: OBJECT_EXISTS, {}, false, ctx); // NOTE: state machine might complete before we reach here - image_ctx.image_lock.put_read(); - image_ctx.owner_lock.put_read(); + image_ctx.image_lock.unlock_shared(); + image_ctx.owner_lock.unlock_shared(); if (!sent) { finish_op(0); } @@ -296,8 +296,8 @@ public: &C_SparsifyObject::handle_post_update_object_map>(this); bool sent; { - RWLock::RLocker owner_locker(image_ctx.owner_lock); - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock owner_locker{image_ctx.owner_lock}; + std::shared_lock image_locker{image_ctx.image_lock}; assert(image_ctx.exclusive_lock->is_lock_owner()); assert(image_ctx.object_map != nullptr); @@ -459,16 +459,16 @@ void SparsifyRequest::send_op() { template void SparsifyRequest::sparsify_objects() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; ldout(cct, 5) << dendl; - assert(image_ctx.owner_lock.is_locked()); + assert(ceph_mutex_is_locked(image_ctx.owner_lock)); uint64_t objects = 0; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; objects = image_ctx.get_object_count(CEPH_NOSNAP); } diff --git a/src/librbd/operation/TrimRequest.cc b/src/librbd/operation/TrimRequest.cc index ab7f31192092..f1a34a5592bf 100644 --- a/src/librbd/operation/TrimRequest.cc +++ b/src/librbd/operation/TrimRequest.cc @@ -39,7 +39,7 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); @@ -69,12 +69,12 @@ public: int send() override { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ceph_assert(image_ctx.exclusive_lock == nullptr || image_ctx.exclusive_lock->is_lock_owner()); { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr && !image_ctx.object_map->object_may_exist(m_object_no)) { return 1; @@ -133,7 +133,7 @@ bool TrimRequest::should_complete(int r) return true; } - RWLock::RLocker owner_lock(image_ctx.owner_lock); + std::shared_lock owner_lock{image_ctx.owner_lock}; switch (m_state) { case STATE_PRE_TRIM: ldout(cct, 5) << " PRE_TRIM" << dendl; @@ -180,7 +180,7 @@ void TrimRequest::send() { template void TrimRequest::send_pre_trim() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); if (m_delete_start >= m_num_objects) { send_clean_boundary(); @@ -188,7 +188,7 @@ void TrimRequest::send_pre_trim() { } { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr) { ldout(image_ctx.cct, 5) << this << " send_pre_trim: " << " delete_start_min=" << m_delete_start_min @@ -211,13 +211,13 @@ void TrimRequest::send_pre_trim() { template void TrimRequest::send_copyup_objects() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ::SnapContext snapc; bool has_snapshots; uint64_t parent_overlap; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; snapc = image_ctx.snapc; has_snapshots = !image_ctx.snaps.empty(); @@ -258,7 +258,7 @@ void TrimRequest::send_copyup_objects() { template void TrimRequest::send_remove_objects() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); ldout(image_ctx.cct, 5) << this << " send_remove_objects: " << " delete_start=" << m_delete_start @@ -279,10 +279,10 @@ void TrimRequest::send_remove_objects() { template void TrimRequest::send_post_trim() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; if (image_ctx.object_map != nullptr) { ldout(image_ctx.cct, 5) << this << " send_post_trim:" << " delete_start_min=" << m_delete_start_min @@ -305,7 +305,7 @@ void TrimRequest::send_post_trim() { template void TrimRequest::send_clean_boundary() { I &image_ctx = this->m_image_ctx; - ceph_assert(image_ctx.owner_lock.is_locked()); + ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock)); CephContext *cct = image_ctx.cct; if (m_delete_off <= m_new_size) { send_finish(0); @@ -323,7 +323,7 @@ void TrimRequest::send_clean_boundary() { ::SnapContext snapc; { - RWLock::RLocker image_locker(image_ctx.image_lock); + std::shared_lock image_locker{image_ctx.image_lock}; snapc = image_ctx.snapc; } diff --git a/src/librbd/watcher/Notifier.cc b/src/librbd/watcher/Notifier.cc index dfb95aec0b85..2715f2592626 100644 --- a/src/librbd/watcher/Notifier.cc +++ b/src/librbd/watcher/Notifier.cc @@ -38,18 +38,18 @@ void Notifier::C_AioNotify::finish(int r) { Notifier::Notifier(ContextWQ *work_queue, IoCtx &ioctx, const std::string &oid) : m_work_queue(work_queue), m_ioctx(ioctx), m_oid(oid), - m_aio_notify_lock(util::unique_lock_name( - "librbd::object_watcher::Notifier::m_aio_notify_lock", this)) { + m_aio_notify_lock(ceph::make_mutex(util::unique_lock_name( + "librbd::object_watcher::Notifier::m_aio_notify_lock", this))) { m_cct = reinterpret_cast(m_ioctx.cct()); } Notifier::~Notifier() { - Mutex::Locker aio_notify_locker(m_aio_notify_lock); + std::lock_guard aio_notify_locker{m_aio_notify_lock}; ceph_assert(m_pending_aio_notifies == 0); } void Notifier::flush(Context *on_finish) { - Mutex::Locker aio_notify_locker(m_aio_notify_lock); + std::lock_guard aio_notify_locker{m_aio_notify_lock}; if (m_pending_aio_notifies == 0) { m_work_queue->queue(on_finish, 0); return; @@ -61,7 +61,7 @@ void Notifier::flush(Context *on_finish) { void Notifier::notify(bufferlist &bl, NotifyResponse *response, Context *on_finish) { { - Mutex::Locker aio_notify_locker(m_aio_notify_lock); + std::lock_guard aio_notify_locker{m_aio_notify_lock}; ++m_pending_aio_notifies; ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl; @@ -77,7 +77,7 @@ void Notifier::notify(bufferlist &bl, NotifyResponse *response, void Notifier::handle_notify(int r, Context *on_finish) { ldout(m_cct, 20) << "r=" << r << dendl; - Mutex::Locker aio_notify_locker(m_aio_notify_lock); + std::lock_guard aio_notify_locker{m_aio_notify_lock}; ceph_assert(m_pending_aio_notifies > 0); --m_pending_aio_notifies; diff --git a/src/librbd/watcher/Notifier.h b/src/librbd/watcher/Notifier.h index 8b0ad37b4d3d..5bfb10399032 100644 --- a/src/librbd/watcher/Notifier.h +++ b/src/librbd/watcher/Notifier.h @@ -8,7 +8,7 @@ #include "include/buffer_fwd.h" #include "include/Context.h" #include "include/rados/librados.hpp" -#include "common/Mutex.h" +#include "common/ceph_mutex.h" #include "common/WorkQueue.h" #include @@ -49,7 +49,7 @@ private: CephContext *m_cct; std::string m_oid; - Mutex m_aio_notify_lock; + ceph::mutex m_aio_notify_lock; size_t m_pending_aio_notifies = 0; Contexts m_aio_notify_flush_ctxs; diff --git a/src/librbd/watcher/RewatchRequest.cc b/src/librbd/watcher/RewatchRequest.cc index 40c3dfe7dd28..cf44cf385cb3 100644 --- a/src/librbd/watcher/RewatchRequest.cc +++ b/src/librbd/watcher/RewatchRequest.cc @@ -2,7 +2,7 @@ // vim: ts=8 sw=2 smarttab #include "librbd/watcher/RewatchRequest.h" -#include "common/RWLock.h" +#include "common/ceph_mutex.h" #include "common/errno.h" #include "librbd/Utils.h" @@ -21,7 +21,7 @@ namespace watcher { using std::string; RewatchRequest::RewatchRequest(librados::IoCtx& ioctx, const string& oid, - RWLock &watch_lock, + ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx, uint64_t *watch_handle, Context *on_finish) : m_ioctx(ioctx), m_oid(oid), m_watch_lock(watch_lock), @@ -34,7 +34,7 @@ void RewatchRequest::send() { } void RewatchRequest::unwatch() { - ceph_assert(m_watch_lock.is_wlocked()); + ceph_assert(ceph_mutex_is_wlocked(m_watch_lock)); if (*m_watch_handle == 0) { rewatch(); return; @@ -88,7 +88,7 @@ void RewatchRequest::handle_rewatch(int r) { } { - RWLock::WLocker watch_locker(m_watch_lock); + std::unique_lock watch_locker{m_watch_lock}; *m_watch_handle = m_rewatch_handle; } diff --git a/src/librbd/watcher/RewatchRequest.h b/src/librbd/watcher/RewatchRequest.h index d4fc250abec7..ce5e31539e42 100644 --- a/src/librbd/watcher/RewatchRequest.h +++ b/src/librbd/watcher/RewatchRequest.h @@ -4,11 +4,11 @@ #ifndef CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H #define CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H +#include "common/ceph_mutex.h" #include "include/int_types.h" #include "include/rados/librados.hpp" struct Context; -struct RWLock; namespace librbd { @@ -18,7 +18,7 @@ class RewatchRequest { public: static RewatchRequest *create(librados::IoCtx& ioctx, const std::string& oid, - RWLock &watch_lock, + ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx, uint64_t *watch_handle, Context *on_finish) { return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle, @@ -26,7 +26,7 @@ public: } RewatchRequest(librados::IoCtx& ioctx, const std::string& oid, - RWLock &watch_lock, librados::WatchCtx2 *watch_ctx, + ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx, uint64_t *watch_handle, Context *on_finish); void send(); @@ -53,7 +53,7 @@ private: librados::IoCtx& m_ioctx; std::string m_oid; - RWLock &m_watch_lock; + ceph::shared_mutex &m_watch_lock; librados::WatchCtx2 *m_watch_ctx; uint64_t *m_watch_handle; Context *m_on_finish;