]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: s/Mutex/ceph::mutex/
authorKefu Chai <kchai@redhat.com>
Sun, 7 Jul 2019 04:38:40 +0000 (12:38 +0800)
committerKefu Chai <kchai@redhat.com>
Sat, 3 Aug 2019 03:27:19 +0000 (11:27 +0800)
Signed-off-by: Kefu Chai <kchai@redhat.com>
136 files changed:
src/librbd/AsyncObjectThrottle.cc
src/librbd/AsyncObjectThrottle.h
src/librbd/AsyncRequest.cc
src/librbd/BlockGuard.h
src/librbd/DeepCopyRequest.cc
src/librbd/DeepCopyRequest.h
src/librbd/ExclusiveLock.cc
src/librbd/ExclusiveLock.h
src/librbd/ImageCtx.cc
src/librbd/ImageCtx.h
src/librbd/ImageState.cc
src/librbd/ImageState.h
src/librbd/ImageWatcher.cc
src/librbd/ImageWatcher.h
src/librbd/Journal.cc
src/librbd/Journal.h
src/librbd/ManagedLock.cc
src/librbd/ManagedLock.h
src/librbd/ObjectMap.cc
src/librbd/ObjectMap.h
src/librbd/Operations.cc
src/librbd/TaskFinisher.h
src/librbd/Watcher.cc
src/librbd/Watcher.h
src/librbd/api/DiffIterate.cc
src/librbd/api/Group.cc
src/librbd/api/Image.cc
src/librbd/api/Migration.cc
src/librbd/api/Mirror.cc
src/librbd/api/Pool.cc
src/librbd/api/Snapshot.cc
src/librbd/api/Trash.cc
src/librbd/cache/ObjectCacherObjectDispatch.cc
src/librbd/cache/ObjectCacherObjectDispatch.h
src/librbd/cache/ObjectCacherWriteback.cc
src/librbd/cache/ObjectCacherWriteback.h
src/librbd/cache/ParentCacheObjectDispatch.h
src/librbd/cache/WriteAroundObjectDispatch.cc
src/librbd/cache/WriteAroundObjectDispatch.h
src/librbd/deep_copy/ImageCopyRequest.cc
src/librbd/deep_copy/ImageCopyRequest.h
src/librbd/deep_copy/ObjectCopyRequest.cc
src/librbd/deep_copy/ObjectCopyRequest.h
src/librbd/deep_copy/SetHeadRequest.cc
src/librbd/deep_copy/SnapshotCopyRequest.cc
src/librbd/deep_copy/SnapshotCopyRequest.h
src/librbd/deep_copy/SnapshotCreateRequest.cc
src/librbd/exclusive_lock/AutomaticPolicy.cc
src/librbd/exclusive_lock/PostAcquireRequest.cc
src/librbd/exclusive_lock/PreReleaseRequest.cc
src/librbd/exclusive_lock/StandardPolicy.cc
src/librbd/image/AttachChildRequest.cc
src/librbd/image/CloneRequest.cc
src/librbd/image/CloseRequest.cc
src/librbd/image/DetachChildRequest.cc
src/librbd/image/ListWatchersRequest.cc
src/librbd/image/OpenRequest.cc
src/librbd/image/PreRemoveRequest.cc
src/librbd/image/RefreshParentRequest.cc
src/librbd/image/RefreshRequest.cc
src/librbd/image/RemoveRequest.cc
src/librbd/image/SetFlagsRequest.cc
src/librbd/image/SetFlagsRequest.h
src/librbd/image/SetSnapRequest.cc
src/librbd/image_watcher/NotifyLockOwner.cc
src/librbd/internal.cc
src/librbd/io/AsyncOperation.cc
src/librbd/io/CopyupRequest.cc
src/librbd/io/CopyupRequest.h
src/librbd/io/ImageRequest.cc
src/librbd/io/ImageRequestWQ.cc
src/librbd/io/ImageRequestWQ.h
src/librbd/io/ObjectDispatcher.cc
src/librbd/io/ObjectDispatcher.h
src/librbd/io/ObjectRequest.cc
src/librbd/io/SimpleSchedulerObjectDispatch.cc
src/librbd/io/SimpleSchedulerObjectDispatch.h
src/librbd/journal/CreateRequest.h
src/librbd/journal/DemoteRequest.cc
src/librbd/journal/DemoteRequest.h
src/librbd/journal/OpenRequest.cc
src/librbd/journal/OpenRequest.h
src/librbd/journal/PromoteRequest.cc
src/librbd/journal/PromoteRequest.h
src/librbd/journal/RemoveRequest.h
src/librbd/journal/Replay.cc
src/librbd/journal/Replay.h
src/librbd/journal/ResetRequest.h
src/librbd/journal/Utils.cc
src/librbd/journal/Utils.h
src/librbd/librbd.cc
src/librbd/mirror/DemoteRequest.cc
src/librbd/mirror/DisableRequest.cc
src/librbd/mirror/DisableRequest.h
src/librbd/object_map/CreateRequest.cc
src/librbd/object_map/CreateRequest.h
src/librbd/object_map/InvalidateRequest.cc
src/librbd/object_map/RefreshRequest.cc
src/librbd/object_map/RefreshRequest.h
src/librbd/object_map/RemoveRequest.cc
src/librbd/object_map/RemoveRequest.h
src/librbd/object_map/Request.cc
src/librbd/object_map/ResizeRequest.cc
src/librbd/object_map/ResizeRequest.h
src/librbd/object_map/SnapshotCreateRequest.cc
src/librbd/object_map/SnapshotCreateRequest.h
src/librbd/object_map/SnapshotRemoveRequest.cc
src/librbd/object_map/SnapshotRemoveRequest.h
src/librbd/object_map/SnapshotRollbackRequest.cc
src/librbd/object_map/UpdateRequest.cc
src/librbd/object_map/UpdateRequest.h
src/librbd/operation/DisableFeaturesRequest.cc
src/librbd/operation/EnableFeaturesRequest.cc
src/librbd/operation/FlattenRequest.cc
src/librbd/operation/MetadataRemoveRequest.cc
src/librbd/operation/MetadataSetRequest.cc
src/librbd/operation/MigrateRequest.cc
src/librbd/operation/ObjectMapIterate.cc
src/librbd/operation/RebuildObjectMapRequest.cc
src/librbd/operation/RenameRequest.cc
src/librbd/operation/Request.cc
src/librbd/operation/Request.h
src/librbd/operation/ResizeRequest.cc
src/librbd/operation/SnapshotCreateRequest.cc
src/librbd/operation/SnapshotLimitRequest.cc
src/librbd/operation/SnapshotProtectRequest.cc
src/librbd/operation/SnapshotRemoveRequest.cc
src/librbd/operation/SnapshotRenameRequest.cc
src/librbd/operation/SnapshotRollbackRequest.cc
src/librbd/operation/SnapshotUnprotectRequest.cc
src/librbd/operation/SparsifyRequest.cc
src/librbd/operation/TrimRequest.cc
src/librbd/watcher/Notifier.cc
src/librbd/watcher/Notifier.h
src/librbd/watcher/RewatchRequest.cc
src/librbd/watcher/RewatchRequest.h

index b6dbcb261399bb4df84684c5123caa2d63eb1399..c62c845394047f79eb44c4dcdeabe14ca13e8066 100644 (file)
@@ -15,7 +15,8 @@ AsyncObjectThrottle<T>::AsyncObjectThrottle(
     const AsyncRequest<T>* async_request, T &image_ctx,
     const ContextFactory& context_factory, Context *ctx,
     ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no)
-  : m_lock(util::unique_lock_name("librbd::AsyncThrottle::m_lock", this)),
+  : m_lock(ceph::make_mutex(
+      util::unique_lock_name("librbd::AsyncThrottle::m_lock", this))),
     m_async_request(async_request), m_image_ctx(image_ctx),
     m_context_factory(context_factory), m_ctx(ctx), m_prog_ctx(prog_ctx),
     m_object_no(object_no), m_end_object_no(end_object_no), m_current_ops(0),
@@ -25,10 +26,10 @@ AsyncObjectThrottle<T>::AsyncObjectThrottle(
 
 template <typename T>
 void AsyncObjectThrottle<T>::start_ops(uint64_t max_concurrent) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   bool complete;
   {
-    Mutex::Locker l(m_lock);
+    std::lock_guard l{m_lock};
     for (uint64_t i = 0; i < max_concurrent; ++i) {
       start_next_op();
       if (m_ret < 0 && m_current_ops == 0) {
@@ -48,8 +49,8 @@ template <typename T>
 void AsyncObjectThrottle<T>::finish_op(int r) {
   bool complete;
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    Mutex::Locker locker(m_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
+    std::lock_guard locker{m_lock};
     --m_current_ops;
     if (r < 0 && r != -ENOENT && m_ret == 0) {
       m_ret = r;
index e1b089626c3495e6e34466cfef1189061405a1f5..64397f9e4b42f8e44417f8a239a53c0c791bbd2a 100644 (file)
@@ -58,7 +58,7 @@ public:
   void finish_op(int r) override;
 
 private:
-  Mutex m_lock;
+  ceph::mutex m_lock;
   const AsyncRequest<ImageCtxT> *m_async_request;
   ImageCtxT &m_image_ctx;
   ContextFactory m_context_factory;
index 8a76a226474df96b37d2038699b33d4641b37c38..67ea116a1515532c3040bc9da2f12ecf5c2cb138 100644 (file)
@@ -43,7 +43,7 @@ Context *AsyncRequest<T>::create_async_callback_context() {
 
 template <typename T>
 void AsyncRequest<T>::start_request() {
-  Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock);
+  std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
   m_image_ctx.async_requests.push_back(&m_xlist_item);
 }
 
@@ -51,7 +51,7 @@ template <typename T>
 void AsyncRequest<T>::finish_request() {
   decltype(m_image_ctx.async_requests_waiters) waiters;
   {
-    Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock);
+    std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
     ceph_assert(m_xlist_item.remove_myself());
 
     if (m_image_ctx.async_requests.empty()) {
index 062f1901d61b7c1f08803b3f9a2e34841711a317..1b59ab7887555d0727c1c3c2f19ca2002c1733eb 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "include/int_types.h"
 #include "common/dout.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include <boost/intrusive/list.hpp>
 #include <boost/intrusive/set.hpp>
 #include <deque>
@@ -48,7 +48,7 @@ public:
   typedef std::list<BlockOperation> BlockOperations;
 
   BlockGuard(CephContext *cct)
-    : m_cct(cct), m_lock("librbd::BlockGuard::m_lock") {
+    : m_cct(cct) {
   }
 
   BlockGuard(const BlockGuard&) = delete;
@@ -63,7 +63,7 @@ public:
    */
   int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
              BlockGuardCell **cell) {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ldout(m_cct, 20) << "block_start=" << block_extent.block_start << ", "
                      << "block_end=" << block_extent.block_end << ", "
                      << "free_slots=" << m_free_detained_block_extents.size()
@@ -104,7 +104,7 @@ public:
    * Release any detained IO operations from the provided cell.
    */
   void release(BlockGuardCell *cell, BlockOperations *block_operations) {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
 
     ceph_assert(cell != nullptr);
     auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
@@ -158,7 +158,7 @@ private:
 
   CephContext *m_cct;
 
-  Mutex m_lock;
+  ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock");
   DetainedBlockExtentsPool m_detained_block_extent_pool;
   DetainedBlockExtents m_free_detained_block_extents;
   BlockExtentToDetainedBlockExtents m_detained_block_extents;
index 07f4e79044eaf064f91fa92877597e80d9d25394..ddbd34a7611870133ec26fdc4031286b697b40bd 100644 (file)
@@ -39,7 +39,7 @@ DeepCopyRequest<I>::DeepCopyRequest(I *src_image_ctx, I *dst_image_ctx,
     m_object_number(object_number), m_work_queue(work_queue),
     m_snap_seqs(snap_seqs), m_prog_ctx(prog_ctx), m_on_finish(on_finish),
     m_cct(dst_image_ctx->cct),
-    m_lock(unique_lock_name("DeepCopyRequest::m_lock", this)) {
+    m_lock(ceph::make_mutex(unique_lock_name("DeepCopyRequest::m_lock", this))) {
 }
 
 template <typename I>
@@ -61,7 +61,7 @@ void DeepCopyRequest<I>::send() {
 
 template <typename I>
 void DeepCopyRequest<I>::cancel() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   ldout(m_cct, 20) << dendl;
 
@@ -78,9 +78,9 @@ void DeepCopyRequest<I>::cancel() {
 
 template <typename I>
 void DeepCopyRequest<I>::send_copy_snapshots() {
-  m_lock.Lock();
+  m_lock.lock();
   if (m_canceled) {
-    m_lock.Unlock();
+    m_lock.unlock();
     finish(-ECANCELED);
     return;
   }
@@ -93,7 +93,7 @@ void DeepCopyRequest<I>::send_copy_snapshots() {
     m_src_image_ctx, m_dst_image_ctx, m_snap_id_end, m_flatten, m_work_queue,
     m_snap_seqs, ctx);
   m_snapshot_copy_request->get();
-  m_lock.Unlock();
+  m_lock.unlock();
 
   m_snapshot_copy_request->send();
 }
@@ -103,7 +103,7 @@ void DeepCopyRequest<I>::handle_copy_snapshots(int r) {
   ldout(m_cct, 20) << "r=" << r << dendl;
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     m_snapshot_copy_request->put();
     m_snapshot_copy_request = nullptr;
     if (r == 0 && m_canceled) {
@@ -131,9 +131,9 @@ void DeepCopyRequest<I>::handle_copy_snapshots(int r) {
 
 template <typename I>
 void DeepCopyRequest<I>::send_copy_image() {
-  m_lock.Lock();
+  m_lock.lock();
   if (m_canceled) {
-    m_lock.Unlock();
+    m_lock.unlock();
     finish(-ECANCELED);
     return;
   }
@@ -146,7 +146,7 @@ void DeepCopyRequest<I>::send_copy_image() {
       m_src_image_ctx, m_dst_image_ctx, m_snap_id_start, m_snap_id_end,
       m_flatten, m_object_number, *m_snap_seqs, m_prog_ctx, ctx);
   m_image_copy_request->get();
-  m_lock.Unlock();
+  m_lock.unlock();
 
   m_image_copy_request->send();
 }
@@ -156,7 +156,7 @@ void DeepCopyRequest<I>::handle_copy_image(int r) {
   ldout(m_cct, 20) << "r=" << r << dendl;
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     m_image_copy_request->put();
     m_image_copy_request = nullptr;
     if (r == 0 && m_canceled) {
@@ -179,19 +179,19 @@ void DeepCopyRequest<I>::handle_copy_image(int r) {
 
 template <typename I>
 void DeepCopyRequest<I>::send_copy_object_map() {
-  m_dst_image_ctx->owner_lock.get_read();
-  m_dst_image_ctx->image_lock.get_read();
+  m_dst_image_ctx->owner_lock.lock_shared();
+  m_dst_image_ctx->image_lock.lock_shared();
 
   if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP,
                                       m_dst_image_ctx->image_lock)) {
-    m_dst_image_ctx->image_lock.put_read();
-    m_dst_image_ctx->owner_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
+    m_dst_image_ctx->owner_lock.unlock_shared();
     send_copy_metadata();
     return;
   }
   if (m_snap_id_end == CEPH_NOSNAP) {
-    m_dst_image_ctx->image_lock.put_read();
-    m_dst_image_ctx->owner_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
+    m_dst_image_ctx->owner_lock.unlock_shared();
     send_refresh_object_map();
     return;
   }
@@ -207,8 +207,8 @@ void DeepCopyRequest<I>::send_copy_object_map() {
   }
   if (finish_op_ctx == nullptr) {
     lderr(m_cct) << "lost exclusive lock" << dendl;
-    m_dst_image_ctx->image_lock.put_read();
-    m_dst_image_ctx->owner_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
+    m_dst_image_ctx->owner_lock.unlock_shared();
     finish(r);
     return;
   }
@@ -221,8 +221,8 @@ void DeepCopyRequest<I>::send_copy_object_map() {
   ceph_assert(m_snap_seqs->count(m_snap_id_end) > 0);
   librados::snap_t copy_snap_id = (*m_snap_seqs)[m_snap_id_end];
   m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx);
-  m_dst_image_ctx->image_lock.put_read();
-  m_dst_image_ctx->owner_lock.put_read();
+  m_dst_image_ctx->image_lock.unlock_shared();
+  m_dst_image_ctx->owner_lock.unlock_shared();
 }
 
 template <typename I>
@@ -244,7 +244,7 @@ void DeepCopyRequest<I>::send_refresh_object_map() {
   int r;
   Context *finish_op_ctx = nullptr;
   {
-    RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+    std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
     if (m_dst_image_ctx->exclusive_lock != nullptr) {
       finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
     }
@@ -279,7 +279,7 @@ void DeepCopyRequest<I>::handle_refresh_object_map(int r) {
   }
 
   {
-    RWLock::WLocker image_locker(m_dst_image_ctx->image_lock);
+    std::unique_lock image_locker{m_dst_image_ctx->image_lock};
     std::swap(m_dst_image_ctx->object_map, m_object_map);
   }
   delete m_object_map;
@@ -313,7 +313,7 @@ void DeepCopyRequest<I>::handle_copy_metadata(int r) {
 
 template <typename I>
 int DeepCopyRequest<I>::validate_copy_points() {
-  RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+  std::shared_lock image_locker{m_src_image_ctx->image_lock};
 
   if (m_snap_id_start != 0 &&
       m_src_image_ctx->snap_info.find(m_snap_id_start) ==
index dba07e6a862dc874d1840ec7bf31e7e70dcba0ab..77b87ddc94fdf54c74e26d49778f2f7eeec8da3c 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H
 #define CEPH_LIBRBD_DEEP_COPY_REQUEST_H
 
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/RefCountedObj.h"
 #include "include/int_types.h"
 #include "librbd/ImageCtx.h"
@@ -96,7 +96,7 @@ private:
   Context *m_on_finish;
 
   CephContext *m_cct;
-  Mutex m_lock;
+  ceph::mutex m_lock;
   bool m_canceled = false;
 
   deep_copy::SnapshotCopyRequest<ImageCtxT> *m_snapshot_copy_request = nullptr;
index 71d98c5b2e6496625ea44c2c5809601fdfd8d5b3..3d3ae93ca3fb92033cddaaa33076d0d820aa6d89 100644 (file)
@@ -10,7 +10,7 @@
 #include "librbd/exclusive_lock/PreReleaseRequest.h"
 #include "librbd/io/ImageRequestWQ.h"
 #include "librbd/Utils.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/dout.h"
 
 #define dout_subsys ceph_subsys_rbd
@@ -32,13 +32,13 @@ ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
           image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
           image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
     m_image_ctx(image_ctx) {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
   ML<I>::set_state_uninitialized();
 }
 
 template <typename I>
 bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
 
   bool accept_requests = (!ML<I>::is_state_shutdown() &&
                           ML<I>::is_state_locked() &&
@@ -53,21 +53,21 @@ bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
 
 template <typename I>
 bool ExclusiveLock<I>::accept_ops() const {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
   bool accept = accept_ops(ML<I>::m_lock);
   ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
   return accept;
 }
 
 template <typename I>
-bool ExclusiveLock<I>::accept_ops(const Mutex &lock) const {
+bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
   return (!ML<I>::is_state_shutdown() &&
           (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
 }
 
 template <typename I>
 void ExclusiveLock<I>::block_requests(int r) {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
 
   m_request_blocked_count++;
   if (m_request_blocked_ret_val == 0) {
@@ -79,7 +79,7 @@ void ExclusiveLock<I>::block_requests(int r) {
 
 template <typename I>
 void ExclusiveLock<I>::unblock_requests() {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
 
   ceph_assert(m_request_blocked_count > 0);
   m_request_blocked_count--;
@@ -100,11 +100,11 @@ int ExclusiveLock<I>::get_unlocked_op_error() const {
 
 template <typename I>
 void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ldout(m_image_ctx.cct, 10) << dendl;
 
   {
-    Mutex::Locker locker(ML<I>::m_lock);
+    std::lock_guard locker{ML<I>::m_lock};
     ML<I>::set_state_initializing();
   }
 
@@ -124,7 +124,7 @@ void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
 
 template <typename I>
 void ExclusiveLock<I>::handle_peer_notification(int r) {
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
   if (!ML<I>::is_state_waiting_for_lock()) {
     return;
   }
@@ -138,8 +138,8 @@ void ExclusiveLock<I>::handle_peer_notification(int r) {
 
 template <typename I>
 Context *ExclusiveLock<I>::start_op(int* ret_val) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  Mutex::Locker locker(ML<I>::m_lock);
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  std::lock_guard locker{ML<I>::m_lock};
 
   if (!accept_ops(ML<I>::m_lock)) {
     *ret_val = get_unlocked_op_error();
@@ -157,7 +157,7 @@ void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
   ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
 
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     if (m_image_ctx.clone_copy_on_read ||
         (features & RBD_FEATURE_JOURNALING) != 0) {
       m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
@@ -166,7 +166,7 @@ void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
     }
   }
 
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
   ML<I>::set_state_unlocked();
 }
 
@@ -175,7 +175,7 @@ void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
   ldout(m_image_ctx.cct, 10) << dendl;
 
   {
-    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    std::unique_lock owner_locker{m_image_ctx.owner_lock};
     m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
     m_image_ctx.exclusive_lock = nullptr;
   }
@@ -190,7 +190,7 @@ void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
 
   int acquire_lock_peer_ret_val = 0;
   {
-    Mutex::Locker locker(ML<I>::m_lock);
+    std::lock_guard locker{ML<I>::m_lock};
     std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
   }
 
@@ -216,7 +216,7 @@ void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
     on_finish->complete(r);
     return;
   } else if (r < 0) {
-    ML<I>::m_lock.Lock();
+    ML<I>::m_lock.lock();
     ceph_assert(ML<I>::is_state_acquiring());
 
     // PostAcquire state machine will not run, so we need complete prepare
@@ -225,7 +225,7 @@ void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
     // if lock is in-use by another client, request the lock
     if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
       ML<I>::set_state_waiting_for_lock();
-      ML<I>::m_lock.Unlock();
+      ML<I>::m_lock.unlock();
 
       // request the lock from a peer
       m_image_ctx.image_watcher->notify_request_lock();
@@ -233,7 +233,7 @@ void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
       // inform manage lock that we have interrupted the state machine
       r = -ECANCELED;
     } else {
-      ML<I>::m_lock.Unlock();
+      ML<I>::m_lock.unlock();
 
       // clear error if peer owns lock
       if (r == -EAGAIN) {
@@ -245,7 +245,7 @@ void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
     return;
   }
 
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
   m_pre_post_callback = on_finish;
   using EL = ExclusiveLock<I>;
   PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
@@ -261,7 +261,7 @@ template <typename I>
 void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
   ldout(m_image_ctx.cct, 10) << dendl;
 
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
 
   ceph_assert(r == 0);
 
@@ -275,7 +275,7 @@ void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
 
   Context *on_finish = nullptr;
   {
-    Mutex::Locker locker(ML<I>::m_lock);
+    std::lock_guard locker{ML<I>::m_lock};
     ceph_assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
 
     assert (m_pre_post_callback != nullptr);
@@ -297,7 +297,7 @@ template <typename I>
 void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
                                                 Context *on_finish) {
   ldout(m_image_ctx.cct, 10) << dendl;
-  Mutex::Locker locker(ML<I>::m_lock);
+  std::lock_guard locker{ML<I>::m_lock};
 
   PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
     m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
@@ -313,7 +313,7 @@ void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
                              << shutting_down << dendl;
   if (!shutting_down) {
     {
-      Mutex::Locker locker(ML<I>::m_lock);
+      std::lock_guard locker{ML<I>::m_lock};
       ceph_assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
     }
 
@@ -322,7 +322,7 @@ void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
     }
   } else {
     {
-      RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+      std::unique_lock owner_locker{m_image_ctx.owner_lock};
       m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
       m_image_ctx.exclusive_lock = nullptr;
     }
index 8b2a411f44232debfed2996337bf48b0ec1789f5..233d632a216da0323c498860e6af201d717c4d65 100644 (file)
@@ -94,7 +94,7 @@ private:
 
   int m_acquire_lock_peer_ret_val = 0;
 
-  bool accept_ops(const Mutex &lock) const;
+  bool accept_ops(const ceph::mutex &lock) const;
 
   void handle_init_complete(uint64_t features);
   void handle_post_acquiring_lock(int r);
index 8c5a07eacaf53c207cd2c9c8aeab45fb1f9b2295..b5e0390b3654a7e51c715d7212ddaf572bfbaf00 100644 (file)
@@ -76,15 +76,14 @@ public:
 
 class SafeTimerSingleton : public SafeTimer {
 public:
-  Mutex lock;
+  ceph::mutex lock = ceph::make_mutex("librbd::Journal::SafeTimerSingleton::lock");
 
   explicit SafeTimerSingleton(CephContext *cct)
-      : SafeTimer(cct, lock, true),
-        lock("librbd::Journal::SafeTimerSingleton::lock") {
+      : SafeTimer(cct, lock, true) {
     init();
   }
   ~SafeTimerSingleton() {
-    Mutex::Locker locker(lock);
+    std::lock_guard locker{lock};
     shutdown();
   }
 };
@@ -105,11 +104,11 @@ public:
       name(image_name),
       image_watcher(NULL),
       journal(NULL),
-      owner_lock(util::unique_lock_name("librbd::ImageCtx::owner_lock", this)),
-      image_lock(util::unique_lock_name("librbd::ImageCtx::image_lock", this)),
-      timestamp_lock(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this)),
-      async_ops_lock(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this)),
-      copyup_list_lock(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this)),
+      owner_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::owner_lock", this))),
+      image_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::image_lock", this))),
+      timestamp_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this))),
+      async_ops_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this))),
+      copyup_list_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this))),
       extra_read_flags(0),
       old_format(false),
       order(0), size(0), features(0),
@@ -319,7 +318,7 @@ public:
   }
 
   int ImageCtx::snap_set(uint64_t in_snap_id) {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     auto it = snap_info.find(in_snap_id);
     if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) {
       snap_id = in_snap_id;
@@ -334,7 +333,7 @@ public:
 
   void ImageCtx::snap_unset()
   {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     snap_id = CEPH_NOSNAP;
     snap_namespace = {};
     snap_name = "";
@@ -345,7 +344,7 @@ public:
   snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
                                const string& in_snap_name) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     auto it = snap_ids.find({in_snap_namespace, in_snap_name});
     if (it != snap_ids.end()) {
       return it->second;
@@ -355,7 +354,7 @@ public:
 
   const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     map<snap_t, SnapInfo>::const_iterator it =
       snap_info.find(in_snap_id);
     if (it != snap_info.end())
@@ -366,7 +365,7 @@ public:
   int ImageCtx::get_snap_name(snap_t in_snap_id,
                              string *out_snap_name) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     const SnapInfo *info = get_snap_info(in_snap_id);
     if (info) {
       *out_snap_name = info->name;
@@ -378,7 +377,7 @@ public:
   int ImageCtx::get_snap_namespace(snap_t in_snap_id,
                                   cls::rbd::SnapshotNamespace *out_snap_namespace) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     const SnapInfo *info = get_snap_info(in_snap_id);
     if (info) {
       *out_snap_namespace = info->snap_namespace;
@@ -400,7 +399,7 @@ public:
 
   uint64_t ImageCtx::get_current_size() const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     return size;
   }
 
@@ -445,20 +444,20 @@ public:
 
   void ImageCtx::set_access_timestamp(utime_t at)
   {
-    ceph_assert(timestamp_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(timestamp_lock));
     access_timestamp = at;
   }
 
   void ImageCtx::set_modify_timestamp(utime_t mt)
   {
-    ceph_assert(timestamp_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(timestamp_lock));
     modify_timestamp = mt;
   }
 
   int ImageCtx::is_snap_protected(snap_t in_snap_id,
                                  bool *is_protected) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     const SnapInfo *info = get_snap_info(in_snap_id);
     if (info) {
       *is_protected =
@@ -471,7 +470,7 @@ public:
   int ImageCtx::is_snap_unprotected(snap_t in_snap_id,
                                    bool *is_unprotected) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     const SnapInfo *info = get_snap_info(in_snap_id);
     if (info) {
       *is_unprotected =
@@ -488,7 +487,7 @@ public:
                           uint8_t protection_status, uint64_t flags,
                           utime_t timestamp)
   {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     snaps.push_back(id);
     SnapInfo info(in_snap_name, in_snap_namespace,
                  in_size, parent, protection_status, flags, timestamp);
@@ -500,7 +499,7 @@ public:
                         string in_snap_name,
                         snap_t id)
   {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end());
     snap_info.erase(id);
     snap_ids.erase({in_snap_namespace, in_snap_name});
@@ -508,7 +507,7 @@ public:
 
   uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     if (in_snap_id == CEPH_NOSNAP) {
       if (!resize_reqs.empty() &&
           resize_reqs.front()->shrinking()) {
@@ -525,40 +524,40 @@ public:
   }
 
   uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     uint64_t image_size = get_image_size(in_snap_id);
     return Striper::get_num_objects(layout, image_size);
   }
 
   bool ImageCtx::test_features(uint64_t features) const
   {
-    RWLock::RLocker l(image_lock);
+    std::shared_lock l{image_lock};
     return test_features(features, image_lock);
   }
 
   bool ImageCtx::test_features(uint64_t in_features,
-                               const RWLock &in_image_lock) const
+                               const ceph::shared_mutex &in_image_lock) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     return ((features & in_features) == in_features);
   }
 
   bool ImageCtx::test_op_features(uint64_t in_op_features) const
   {
-    RWLock::RLocker image_locker(image_lock);
+    std::shared_lock l{image_lock};
     return test_op_features(in_op_features, image_lock);
   }
 
   bool ImageCtx::test_op_features(uint64_t in_op_features,
-                                  const RWLock &in_image_lock) const
+                                  const ceph::shared_mutex &in_image_lock) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     return ((op_features & in_op_features) == in_op_features);
   }
 
   int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     if (_snap_id == CEPH_NOSNAP) {
       *_flags = flags;
       return 0;
@@ -574,15 +573,16 @@ public:
   int ImageCtx::test_flags(librados::snap_t in_snap_id,
                            uint64_t flags, bool *flags_set) const
   {
-    RWLock::RLocker l(image_lock);
+    std::shared_lock l{image_lock};
     return test_flags(in_snap_id, flags, image_lock, flags_set);
   }
 
   int ImageCtx::test_flags(librados::snap_t in_snap_id,
-                           uint64_t flags, const RWLock &in_image_lock,
+                           uint64_t flags,
+                           const ceph::shared_mutex &in_image_lock,
                            bool *flags_set) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     uint64_t snap_flags;
     int r = get_flags(in_snap_id, &snap_flags);
     if (r < 0) {
@@ -594,7 +594,7 @@ public:
 
   int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled)
   {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     uint64_t *_flags;
     if (in_snap_id == CEPH_NOSNAP) {
       _flags = &flags;
@@ -616,7 +616,7 @@ public:
 
   const ParentImageInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     if (in_snap_id == CEPH_NOSNAP)
       return &parent_md;
     const SnapInfo *info = get_snap_info(in_snap_id);
@@ -651,7 +651,7 @@ public:
 
   int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const
   {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     const auto info = get_parent_info(in_snap_id);
     if (info) {
       *overlap = info->overlap;
@@ -695,7 +695,7 @@ public:
 
   void ImageCtx::cancel_async_requests(Context *on_finish) {
     {
-      Mutex::Locker async_ops_locker(async_ops_lock);
+      std::lock_guard async_ops_locker{async_ops_lock};
       if (!async_requests.empty()) {
         ldout(cct, 10) << "canceling async requests: count="
                        << async_requests.size() << dendl;
@@ -823,8 +823,8 @@ public:
 
   void ImageCtx::set_image_name(const std::string &image_name) {
     // update the name so rename can be invoked repeatedly
-    RWLock::RLocker owner_locker(owner_lock);
-    RWLock::WLocker image_locker(image_lock);
+    std::shared_lock owner_locker{owner_lock};
+    std::unique_lock image_locker{image_lock};
     name = image_name;
     if (old_format) {
       header_oid = util::old_header_name(image_name);
@@ -842,26 +842,26 @@ public:
   }
 
   exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const {
-    ceph_assert(owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(owner_lock));
     ceph_assert(exclusive_lock_policy != nullptr);
     return exclusive_lock_policy;
   }
 
   void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) {
-    ceph_assert(owner_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(owner_lock));
     ceph_assert(policy != nullptr);
     delete exclusive_lock_policy;
     exclusive_lock_policy = policy;
   }
 
   journal::Policy *ImageCtx::get_journal_policy() const {
-    ceph_assert(image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_lock));
     ceph_assert(journal_policy != nullptr);
     return journal_policy;
   }
 
   void ImageCtx::set_journal_policy(journal::Policy *policy) {
-    ceph_assert(image_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(image_lock));
     ceph_assert(policy != nullptr);
     delete journal_policy;
     journal_policy = policy;
@@ -878,7 +878,7 @@ public:
   }
 
   void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer,
-                                    Mutex **timer_lock) {
+                                    ceph::mutex **timer_lock) {
     auto safe_timer_singleton =
       &cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
        "librbd::journal::safe_timer", false, cct);
index 2ae215fcab192c293c486070755db59dd1ba0f79..84a5dbec2edf135f171058754859f81ed72aa26e 100644 (file)
 #include <vector>
 
 #include "common/allocator.h"
+#include "common/ceph_mutex.h"
 #include "common/config_proxy.h"
 #include "common/event_socket.h"
-#include "common/Mutex.h"
 #include "common/Readahead.h"
-#include "common/RWLock.h"
 #include "common/snap_types.h"
 #include "common/zipkin_trace.h"
 
@@ -105,8 +104,8 @@ namespace librbd {
      * owner_lock, image_lock
      * async_op_lock, timestamp_lock
      */
-    RWLock owner_lock; // protects exclusive lock leadership updates
-    RWLock image_lock; // protects snapshot-related member variables,
+    ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates
+    mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables,
                        // features (and associated helper classes), and flags
                        // protects access to the mutable image metadata that
                        // isn't guarded by other locks below, and blocks writes
@@ -119,9 +118,9 @@ namespace librbd {
                        // object_map
                        // parent_md and parent
 
-    RWLock timestamp_lock; // protects (create/access/modify)_timestamp
-    Mutex async_ops_lock; // protects async_ops and async_requests
-    Mutex copyup_list_lock; // protects copyup_waiting_list
+    ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp
+    ceph::mutex async_ops_lock; // protects async_ops and async_requests
+    ceph::mutex copyup_list_lock; // protects copyup_waiting_list
 
     unsigned extra_read_flags;
 
@@ -281,15 +280,15 @@ namespace librbd {
     uint64_t get_object_count(librados::snap_t in_snap_id) const;
     bool test_features(uint64_t test_features) const;
     bool test_features(uint64_t test_features,
-                       const RWLock &in_image_lock) const;
+                       const ceph::shared_mutex &in_image_lock) const;
     bool test_op_features(uint64_t op_features) const;
     bool test_op_features(uint64_t op_features,
-                          const RWLock &in_image_lock) const;
+                          const ceph::shared_mutex &in_image_lock) const;
     int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const;
     int test_flags(librados::snap_t in_snap_id,
                    uint64_t test_flags, bool *flags_set) const;
     int test_flags(librados::snap_t in_snap_id,
-                   uint64_t test_flags, const RWLock &in_image_lock,
+                   uint64_t test_flags, const ceph::shared_mutex &in_image_lock,
                    bool *flags_set) const;
     int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled);
 
@@ -328,7 +327,7 @@ namespace librbd {
                                          ThreadPool **thread_pool,
                                          ContextWQ **op_work_queue);
     static void get_timer_instance(CephContext *cct, SafeTimer **timer,
-                                   Mutex **timer_lock);
+                                   ceph::mutex **timer_lock);
   };
 }
 
index 1f0535e25c823704b9d63b1b0ebb8dd055bd703d..acdfaeea9cde8c62a388052342ff89cda7e06549 100644 (file)
@@ -27,7 +27,7 @@ class ImageUpdateWatchers {
 public:
 
   explicit ImageUpdateWatchers(CephContext *cct) : m_cct(cct),
-    m_lock(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this)) {
+    m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this))) {
   }
 
   ~ImageUpdateWatchers() {
@@ -42,7 +42,7 @@ public:
   void flush(Context *on_finish) {
     ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
     {
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
       if (!m_in_flight.empty()) {
        Context *ctx = new FunctionContext(
          [this, on_finish](int r) {
@@ -62,7 +62,7 @@ public:
   void shut_down(Context *on_finish) {
     ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
     {
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
       ceph_assert(m_on_shut_down_finish == nullptr);
       m_watchers.clear();
       if (!m_in_flight.empty()) {
@@ -78,7 +78,7 @@ public:
   void register_watcher(UpdateWatchCtx *watcher, uint64_t *handle) {
     ldout(m_cct, 20) << __func__ << ": watcher=" << watcher << dendl;
 
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_on_shut_down_finish == nullptr);
 
     create_work_queue();
@@ -92,7 +92,7 @@ public:
                     << handle << dendl;
     int r = 0;
     {
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
       auto it = m_watchers.find(handle);
       if (it == m_watchers.end()) {
        r = -ENOENT;
@@ -116,14 +116,14 @@ public:
   void notify() {
     ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
 
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     for (auto it : m_watchers) {
       send_notify(it.first, it.second);
     }
   }
 
   void send_notify(uint64_t handle, UpdateWatchCtx *watcher) {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
 
     ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
                     << handle << ", watcher=" << watcher << dendl;
@@ -149,7 +149,7 @@ public:
     Context *on_shut_down_finish = nullptr;
 
     {
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
 
       auto in_flight_it = m_in_flight.find(handle);
       ceph_assert(in_flight_it != m_in_flight.end());
@@ -200,7 +200,7 @@ private:
   };
 
   CephContext *m_cct;
-  Mutex m_lock;
+  ceph::mutex m_lock;
   ContextWQ *m_work_queue = nullptr;
   std::map<uint64_t, UpdateWatchCtx*> m_watchers;
   uint64_t m_next_handle = 0;
@@ -232,7 +232,7 @@ private:
 template <typename I>
 ImageState<I>::ImageState(I *image_ctx)
   : m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED),
-    m_lock(util::unique_lock_name("librbd::ImageState::m_lock", this)),
+    m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageState::m_lock", this))),
     m_last_refresh(0), m_refresh_seq(0),
     m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)) {
 }
@@ -260,7 +260,7 @@ void ImageState<I>::open(uint64_t flags, Context *on_finish) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   ceph_assert(m_state == STATE_UNINITIALIZED);
   m_open_flags = flags;
 
@@ -285,7 +285,7 @@ void ImageState<I>::close(Context *on_finish) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   ceph_assert(!is_closed());
 
   Action action(ACTION_TYPE_CLOSE);
@@ -295,7 +295,7 @@ void ImageState<I>::close(Context *on_finish) {
 
 template <typename I>
 void ImageState<I>::handle_update_notification() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ++m_refresh_seq;
 
   CephContext *cct = m_image_ctx->cct;
@@ -309,7 +309,7 @@ void ImageState<I>::handle_update_notification() {
 
 template <typename I>
 bool ImageState<I>::is_refresh_required() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return (m_last_refresh != m_refresh_seq || find_pending_refresh() != nullptr);
 }
 
@@ -325,9 +325,9 @@ void ImageState<I>::refresh(Context *on_finish) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   if (is_closed()) {
-    m_lock.Unlock();
+    m_lock.unlock();
     on_finish->complete(-ESHUTDOWN);
     return;
   }
@@ -341,7 +341,7 @@ template <typename I>
 int ImageState<I>::refresh_if_required() {
   C_SaferCond ctx;
   {
-    m_lock.Lock();
+    m_lock.lock();
     Action action(ACTION_TYPE_REFRESH);
     action.refresh_seq = m_refresh_seq;
 
@@ -350,10 +350,10 @@ int ImageState<I>::refresh_if_required() {
       // if a refresh is in-flight, delay until it is finished
       action = *refresh_action;
     } else if (m_last_refresh == m_refresh_seq) {
-      m_lock.Unlock();
+      m_lock.unlock();
       return 0;
     } else if (is_closed()) {
-      m_lock.Unlock();
+      m_lock.unlock();
       return -ESHUTDOWN;
     }
 
@@ -366,7 +366,7 @@ int ImageState<I>::refresh_if_required() {
 template <typename I>
 const typename ImageState<I>::Action *
 ImageState<I>::find_pending_refresh() const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   auto it = std::find_if(m_actions_contexts.rbegin(),
                          m_actions_contexts.rend(),
@@ -387,7 +387,7 @@ void ImageState<I>::snap_set(uint64_t snap_id, Context *on_finish) {
   Action action(ACTION_TYPE_SET_SNAP);
   action.snap_id = snap_id;
 
-  m_lock.Lock();
+  m_lock.lock();
   execute_action_unlock(action, on_finish);
 }
 
@@ -396,9 +396,9 @@ void ImageState<I>::prepare_lock(Context *on_ready) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << __func__ << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   if (is_closed()) {
-    m_lock.Unlock();
+    m_lock.unlock();
     on_ready->complete(-ESHUTDOWN);
     return;
   }
@@ -413,9 +413,9 @@ void ImageState<I>::handle_prepare_lock_complete() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << __func__ << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   if (m_state != STATE_PREPARING_LOCK) {
-    m_lock.Unlock();
+    m_lock.unlock();
     return;
   }
 
@@ -479,7 +479,7 @@ bool ImageState<I>::is_transition_state() const {
 
 template <typename I>
 bool ImageState<I>::is_closed() const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   return ((m_state == STATE_CLOSED) ||
           (!m_actions_contexts.empty() &&
@@ -488,7 +488,7 @@ bool ImageState<I>::is_closed() const {
 
 template <typename I>
 void ImageState<I>::append_context(const Action &action, Context *context) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   ActionContexts *action_contexts = nullptr;
   for (auto &action_ctxs : m_actions_contexts) {
@@ -510,7 +510,7 @@ void ImageState<I>::append_context(const Action &action, Context *context) {
 
 template <typename I>
 void ImageState<I>::execute_next_action_unlock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!m_actions_contexts.empty());
   switch (m_actions_contexts.front().first.action_type) {
   case ACTION_TYPE_OPEN:
@@ -535,44 +535,44 @@ void ImageState<I>::execute_next_action_unlock() {
 template <typename I>
 void ImageState<I>::execute_action_unlock(const Action &action,
                                           Context *on_finish) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   append_context(action, on_finish);
   if (!is_transition_state()) {
     execute_next_action_unlock();
   } else {
-    m_lock.Unlock();
+    m_lock.unlock();
   }
 }
 
 template <typename I>
 void ImageState<I>::complete_action_unlock(State next_state, int r) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!m_actions_contexts.empty());
 
   ActionContexts action_contexts(std::move(m_actions_contexts.front()));
   m_actions_contexts.pop_front();
 
   m_state = next_state;
-  m_lock.Unlock();
+  m_lock.unlock();
 
   for (auto ctx : action_contexts.second) {
     ctx->complete(r);
   }
 
   if (next_state != STATE_UNINITIALIZED && next_state != STATE_CLOSED) {
-    m_lock.Lock();
+    m_lock.lock();
     if (!is_transition_state() && !m_actions_contexts.empty()) {
       execute_next_action_unlock();
     } else {
-      m_lock.Unlock();
+      m_lock.unlock();
     }
   }
 }
 
 template <typename I>
 void ImageState<I>::send_open_unlock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
@@ -584,7 +584,7 @@ void ImageState<I>::send_open_unlock() {
   image::OpenRequest<I> *req = image::OpenRequest<I>::create(
     m_image_ctx, m_open_flags, ctx);
 
-  m_lock.Unlock();
+  m_lock.unlock();
   req->send();
 }
 
@@ -597,13 +597,13 @@ void ImageState<I>::handle_open(int r) {
     lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
   }
 
-  m_lock.Lock();
+  m_lock.lock();
   complete_action_unlock(r < 0 ? STATE_UNINITIALIZED : STATE_OPEN, r);
 }
 
 template <typename I>
 void ImageState<I>::send_close_unlock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
@@ -614,7 +614,7 @@ void ImageState<I>::send_close_unlock() {
   image::CloseRequest<I> *req = image::CloseRequest<I>::create(
     m_image_ctx, ctx);
 
-  m_lock.Unlock();
+  m_lock.unlock();
   req->send();
 }
 
@@ -628,13 +628,13 @@ void ImageState<I>::handle_close(int r) {
                << dendl;
   }
 
-  m_lock.Lock();
+  m_lock.lock();
   complete_action_unlock(STATE_CLOSED, r);
 }
 
 template <typename I>
 void ImageState<I>::send_refresh_unlock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
@@ -649,7 +649,7 @@ void ImageState<I>::send_refresh_unlock() {
   image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
     *m_image_ctx, false, false, ctx);
 
-  m_lock.Unlock();
+  m_lock.unlock();
   req->send();
 }
 
@@ -658,7 +658,7 @@ void ImageState<I>::handle_refresh(int r) {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   ceph_assert(!m_actions_contexts.empty());
 
   ActionContexts &action_contexts(m_actions_contexts.front());
@@ -677,7 +677,7 @@ void ImageState<I>::handle_refresh(int r) {
 
 template <typename I>
 void ImageState<I>::send_set_snap_unlock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   m_state = STATE_SETTING_SNAP;
 
@@ -695,7 +695,7 @@ void ImageState<I>::send_set_snap_unlock() {
   image::SetSnapRequest<I> *req = image::SetSnapRequest<I>::create(
     *m_image_ctx, action_contexts.first.snap_id, ctx);
 
-  m_lock.Unlock();
+  m_lock.unlock();
   req->send();
 }
 
@@ -708,7 +708,7 @@ void ImageState<I>::handle_set_snap(int r) {
     lderr(cct) << "failed to set snapshot: " << cpp_strerror(r) << dendl;
   }
 
-  m_lock.Lock();
+  m_lock.lock();
   complete_action_unlock(STATE_OPEN, r);
 }
 
@@ -717,7 +717,7 @@ void ImageState<I>::send_prepare_lock_unlock() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   m_state = STATE_PREPARING_LOCK;
 
   ceph_assert(!m_actions_contexts.empty());
@@ -725,7 +725,7 @@ void ImageState<I>::send_prepare_lock_unlock() {
   ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
 
   Context *on_ready = action_contexts.first.on_ready;
-  m_lock.Unlock();
+  m_lock.unlock();
 
   if (on_ready == nullptr) {
     complete_action_unlock(STATE_OPEN, 0);
index 7f28d1eec712e2e4c0df6d220d5abefc3b788274..9daa5137ed9ea5262919f4d2e164c419b0d49d51 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_IMAGE_STATE_H
 
 #include "include/int_types.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include <list>
 #include <string>
 #include <utility>
@@ -102,7 +102,7 @@ private:
   ImageCtxT *m_image_ctx;
   State m_state;
 
-  mutable Mutex m_lock;
+  mutable ceph::mutex m_lock;
   ActionsContexts m_actions_contexts;
 
   uint64_t m_last_refresh;
index 055aec196a148aae8e142a3ccb95989d9ecb74c5..da5a39fdf845034d5afef405571d492cb828ac6d 100644 (file)
@@ -68,8 +68,10 @@ ImageWatcher<I>::ImageWatcher(I &image_ctx)
   : Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid),
     m_image_ctx(image_ctx),
     m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)),
-    m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)),
-    m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this))
+    m_async_request_lock(ceph::make_shared_mutex(
+      util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))),
+    m_owner_client_id_lock(ceph::make_mutex(
+      util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)))
 {
 }
 
@@ -156,7 +158,7 @@ void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request,
       schedule_async_complete(request, r);
     }
   } else {
-    RWLock::WLocker async_request_locker(m_async_request_lock);
+    std::unique_lock async_request_locker{m_async_request_lock};
     m_async_pending.erase(request);
   }
 }
@@ -165,7 +167,7 @@ template <typename I>
 void ImageWatcher<I>::notify_flatten(uint64_t request_id,
                                      ProgressContext &prog_ctx,
                                      Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -180,7 +182,7 @@ void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size,
                                    bool allow_shrink,
                                     ProgressContext &prog_ctx,
                                     Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -195,7 +197,7 @@ template <typename I>
 void ImageWatcher<I>::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
                                         const std::string &snap_name,
                                          Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -206,7 +208,7 @@ template <typename I>
 void ImageWatcher<I>::notify_snap_rename(const snapid_t &src_snap_id,
                                         const std::string &dst_snap_name,
                                         Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -217,7 +219,7 @@ template <typename I>
 void ImageWatcher<I>::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace,
                                         const std::string &snap_name,
                                          Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -228,7 +230,7 @@ template <typename I>
 void ImageWatcher<I>::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace,
                                          const std::string &snap_name,
                                           Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -239,7 +241,7 @@ template <typename I>
 void ImageWatcher<I>::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace,
                                            const std::string &snap_name,
                                             Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -250,7 +252,7 @@ template <typename I>
 void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
                                                 ProgressContext &prog_ctx,
                                                 Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -264,7 +266,7 @@ void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
 template <typename I>
 void ImageWatcher<I>::notify_rename(const std::string &image_name,
                                     Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -274,7 +276,7 @@ void ImageWatcher<I>::notify_rename(const std::string &image_name,
 template <typename I>
 void ImageWatcher<I>::notify_update_features(uint64_t features, bool enabled,
                                              Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -285,7 +287,7 @@ template <typename I>
 void ImageWatcher<I>::notify_migrate(uint64_t request_id,
                                      ProgressContext &prog_ctx,
                                      Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -299,7 +301,7 @@ template <typename I>
 void ImageWatcher<I>::notify_sparsify(uint64_t request_id, size_t sparse_size,
                                       ProgressContext &prog_ctx,
                                       Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -336,7 +338,7 @@ void ImageWatcher<I>::schedule_cancel_async_requests() {
 
 template <typename I>
 void ImageWatcher<I>::cancel_async_requests() {
-  RWLock::WLocker l(m_async_request_lock);
+  std::unique_lock l{m_async_request_lock};
   for (std::map<AsyncRequestId, AsyncRequest>::iterator iter =
         m_async_requests.begin();
        iter != m_async_requests.end(); ++iter) {
@@ -347,7 +349,7 @@ void ImageWatcher<I>::cancel_async_requests() {
 
 template <typename I>
 void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
-  ceph_assert(m_owner_client_id_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock));
   m_owner_client_id = client_id;
   ldout(m_image_ctx.cct, 10) << this << " current lock owner: "
                              << m_owner_client_id << dendl;
@@ -355,7 +357,7 @@ void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
 
 template <typename I>
 ClientId ImageWatcher<I>::get_client_id() {
-  RWLock::RLocker l(this->m_watch_lock);
+  std::shared_lock l{this->m_watch_lock};
   return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle);
 }
 
@@ -365,7 +367,7 @@ void ImageWatcher<I>::notify_acquired_lock() {
 
   ClientId client_id = get_client_id();
   {
-    Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+    std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
     set_owner_client_id(client_id);
   }
 
@@ -377,7 +379,7 @@ void ImageWatcher<I>::notify_released_lock() {
   ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl;
 
   {
-    Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+    std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
     set_owner_client_id(ClientId());
   }
 
@@ -386,7 +388,7 @@ void ImageWatcher<I>::notify_released_lock() {
 
 template <typename I>
 void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   if (m_image_ctx.exclusive_lock == nullptr) {
     // exclusive lock dynamically disabled via image refresh
@@ -395,7 +397,7 @@ void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
   ceph_assert(m_image_ctx.exclusive_lock &&
               !m_image_ctx.exclusive_lock->is_lock_owner());
 
-  RWLock::RLocker watch_locker(this->m_watch_lock);
+  std::shared_lock watch_locker{this->m_watch_lock};
   if (this->is_registered(this->m_watch_lock)) {
     ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl;
 
@@ -415,8 +417,8 @@ void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
 
 template <typename I>
 void ImageWatcher<I>::notify_request_lock() {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::shared_lock image_locker{m_image_ctx.image_lock};
 
   // ExclusiveLock state machine can be dynamically disabled or
   // race with task cancel
@@ -434,8 +436,8 @@ void ImageWatcher<I>::notify_request_lock() {
 
 template <typename I>
 void ImageWatcher<I>::handle_request_lock(int r) {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::shared_lock image_locker{m_image_ctx.image_lock};
 
   // ExclusiveLock state machine cannot transition -- but can be
   // dynamically disabled
@@ -470,7 +472,7 @@ template <typename I>
 void ImageWatcher<I>::notify_lock_owner(const Payload& payload,
                                         Context *on_finish) {
   ceph_assert(on_finish != nullptr);
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   bufferlist bl;
   encode(NotifyMessage(payload), bl);
@@ -482,7 +484,7 @@ void ImageWatcher<I>::notify_lock_owner(const Payload& payload,
 
 template <typename I>
 Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) {
-  RWLock::WLocker async_request_locker(m_async_request_lock);
+  std::unique_lock async_request_locker{m_async_request_lock};
   auto it = m_async_requests.find(id);
   if (it != m_async_requests.end()) {
     Context *on_complete = it->second.first;
@@ -523,7 +525,7 @@ void ImageWatcher<I>::notify_async_request(const AsyncRequestId &async_request_i
                                            ProgressContext& prog_ctx,
                                            Context *on_finish) {
   ceph_assert(on_finish != nullptr);
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id
                              << dendl;
@@ -545,7 +547,7 @@ void ImageWatcher<I>::notify_async_request(const AsyncRequestId &async_request_i
     });
 
   {
-    RWLock::WLocker async_request_locker(m_async_request_lock);
+    std::unique_lock async_request_locker{m_async_request_lock};
     m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx);
   }
 
@@ -560,7 +562,7 @@ int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_i
   if (async_request_id.client_id == get_client_id()) {
     return -ERESTART;
   } else {
-    RWLock::WLocker l(m_async_request_lock);
+    std::unique_lock l{m_async_request_lock};
     if (m_async_pending.count(async_request_id) == 0) {
       m_async_pending.insert(async_request_id);
       *new_request = true;
@@ -595,14 +597,14 @@ bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload,
 
   bool cancel_async_requests = true;
   if (payload.client_id.is_valid()) {
-    Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+    std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
     if (payload.client_id == m_owner_client_id) {
       cancel_async_requests = false;
     }
     set_owner_client_id(payload.client_id);
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     // potentially wake up the exclusive lock state machine now that
     // a lock owner has advertised itself
@@ -623,7 +625,7 @@ bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload,
 
   bool cancel_async_requests = true;
   if (payload.client_id.is_valid()) {
-    Mutex::Locker l(m_owner_client_id_lock);
+    std::lock_guard l{m_owner_client_id_lock};
     if (payload.client_id != m_owner_client_id) {
       ldout(m_image_ctx.cct, 10) << this << " unexpected owner: "
                                  << payload.client_id << " != "
@@ -634,7 +636,7 @@ bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload,
     }
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (cancel_async_requests &&
       (m_image_ctx.exclusive_lock == nullptr ||
        !m_image_ctx.exclusive_lock->is_lock_owner())) {
@@ -658,7 +660,7 @@ bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
     return true;
   }
 
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr &&
       m_image_ctx.exclusive_lock->is_lock_owner()) {
     int r = 0;
@@ -666,7 +668,7 @@ bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
 
     if (accept_request) {
       ceph_assert(r == 0);
-      Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+      std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
       if (!m_owner_client_id.is_valid()) {
         return true;
       }
@@ -684,7 +686,7 @@ bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_async_request_lock);
+  std::shared_lock l{m_async_request_lock};
   std::map<AsyncRequestId, AsyncRequest>::iterator req_it =
     m_async_requests.find(payload.async_request_id);
   if (req_it != m_async_requests.end()) {
@@ -715,7 +717,7 @@ template <typename I>
 bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload,
                                     C_NotifyAck *ack_ctx) {
 
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -741,7 +743,7 @@ bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const ResizePayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -769,7 +771,7 @@ bool ImageWatcher<I>::handle_payload(const ResizePayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -791,7 +793,7 @@ bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -813,7 +815,7 @@ bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -834,7 +836,7 @@ bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -855,7 +857,7 @@ bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -876,7 +878,7 @@ bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -903,7 +905,7 @@ bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const RenamePayload& payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -923,7 +925,7 @@ bool ImageWatcher<I>::handle_payload(const RenamePayload& payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload,
                                      C_NotifyAck *ack_ctx) {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -946,7 +948,7 @@ template <typename I>
 bool ImageWatcher<I>::handle_payload(const MigratePayload &payload,
                                     C_NotifyAck *ack_ctx) {
 
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -972,7 +974,7 @@ bool ImageWatcher<I>::handle_payload(const MigratePayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
@@ -999,7 +1001,7 @@ bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload,
 template <typename I>
 bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload,
                                     C_NotifyAck *ack_ctx) {
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock != nullptr) {
     int r;
     if (m_image_ctx.exclusive_lock->accept_requests(&r) || r < 0) {
@@ -1050,7 +1052,7 @@ void ImageWatcher<I>::handle_error(uint64_t handle, int err) {
                          << cpp_strerror(err) << dendl;
 
   {
-    Mutex::Locker l(m_owner_client_id_lock);
+    std::lock_guard l{m_owner_client_id_lock};
     set_owner_client_id(ClientId());
   }
 
@@ -1063,7 +1065,7 @@ void ImageWatcher<I>::handle_rewatch_complete(int r) {
   ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
 
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     if (m_image_ctx.exclusive_lock != nullptr) {
       // update the lock cookie with the new watch handle
       m_image_ctx.exclusive_lock->reacquire_lock(nullptr);
index 441a7d696ad3e1b61212f2cc84b4721416f92cf8..1ae54d80bfd4b5fd099762323f6bc97c93467030 100644 (file)
@@ -5,8 +5,7 @@
 #define CEPH_LIBRBD_IMAGE_WATCHER_H
 
 #include "cls/rbd/cls_rbd_types.h"
-#include "common/Mutex.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
 #include "include/Context.h"
 #include "include/rbd/librbd.hpp"
 #include "librbd/Watcher.h"
@@ -165,11 +164,11 @@ private:
 
   TaskFinisher<Task> *m_task_finisher;
 
-  RWLock m_async_request_lock;
+  ceph::shared_mutex m_async_request_lock;
   std::map<watch_notify::AsyncRequestId, AsyncRequest> m_async_requests;
   std::set<watch_notify::AsyncRequestId> m_async_pending;
 
-  Mutex m_owner_client_id_lock;
+  ceph::mutex m_owner_client_id_lock;
   watch_notify::ClientId m_owner_client_id;
 
   void handle_register_watch(int r);
index 66abdf9020733d08c65ead97c1e4c45dcc19ac88..b73a240e2316a9beba80209152e36d96687d41c0 100644 (file)
@@ -134,13 +134,13 @@ struct GetTagsRequest {
   journal::TagData *tag_data;
   Context *on_finish;
 
-  Mutex lock;
+  ceph::mutex lock = ceph::make_mutex("lock");
 
   GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client,
                  journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
                  journal::TagData *tag_data, Context *on_finish)
     : cct(cct), journaler(journaler), client(client), client_meta(client_meta),
-      tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish), lock("lock") {
+      tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) {
   }
 
   /**
@@ -327,9 +327,9 @@ std::ostream &operator<<(std::ostream &os,
 template <typename I>
 Journal<I>::Journal(I &image_ctx)
   : m_image_ctx(image_ctx), m_journaler(NULL),
-    m_lock("Journal<I>::m_lock"), m_state(STATE_UNINITIALIZED),
+    m_state(STATE_UNINITIALIZED),
     m_error_result(0), m_replay_handler(this), m_close_pending(false),
-    m_event_lock("Journal<I>::m_event_lock"), m_event_tid(0),
+    m_event_tid(0),
     m_blocking_writes(false), m_journal_replay(NULL),
     m_metadata_listener(this) {
 
@@ -360,7 +360,7 @@ Journal<I>::~Journal() {
 
 template <typename I>
 bool Journal<I>::is_journal_supported(I &image_ctx) {
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
   return ((image_ctx.features & RBD_FEATURE_JOURNALING) &&
           !image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP);
 }
@@ -462,7 +462,7 @@ int Journal<I>::request_resync(I *image_ctx) {
   Journaler journaler(image_ctx->md_ctx, image_ctx->id, IMAGE_CLIENT_ID, {},
                       nullptr);
 
-  Mutex lock("lock");
+  ceph::mutex lock = ceph::make_mutex("lock");
   journal::ImageClientMeta client_meta;
   uint64_t tag_tid;
   journal::TagData tag_data;
@@ -521,19 +521,19 @@ void Journal<I>::demote(I *image_ctx, Context *on_finish) {
 
 template <typename I>
 bool Journal<I>::is_journal_ready() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return (m_state == STATE_READY);
 }
 
 template <typename I>
 bool Journal<I>::is_journal_replaying() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return is_journal_replaying(m_lock);
 }
 
 template <typename I>
-bool Journal<I>::is_journal_replaying(const Mutex &) const {
-  ceph_assert(m_lock.is_locked());
+bool Journal<I>::is_journal_replaying(const ceph::mutex &) const {
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   return (m_state == STATE_REPLAYING ||
           m_state == STATE_FLUSHING_REPLAY ||
           m_state == STATE_FLUSHING_RESTART ||
@@ -542,8 +542,8 @@ bool Journal<I>::is_journal_replaying(const Mutex &) const {
 
 template <typename I>
 bool Journal<I>::is_journal_appending() const {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
-  Mutex::Locker locker(m_lock);
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+  std::lock_guard locker{m_lock};
   return (m_state == STATE_READY &&
           !m_image_ctx.get_journal_policy()->append_disabled());
 }
@@ -552,7 +552,7 @@ template <typename I>
 void Journal<I>::wait_for_journal_ready(Context *on_ready) {
   on_ready = create_async_context_callback(m_image_ctx, on_ready);
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   if (m_state == STATE_READY) {
     on_ready->complete(m_error_result);
   } else {
@@ -571,7 +571,7 @@ void Journal<I>::open(Context *on_finish) {
   m_image_ctx.io_object_dispatcher->register_object_dispatch(
     journal::ObjectDispatch<I>::create(&m_image_ctx, this));
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_UNINITIALIZED);
   wait_for_steady_state(on_finish);
   create_journaler();
@@ -592,21 +592,19 @@ void Journal<I>::close(Context *on_finish) {
     });
   on_finish = create_async_context_callback(m_image_ctx, on_finish);
 
-  Mutex::Locker locker(m_lock);
-  while (m_listener_notify) {
-    m_listener_cond.Wait(m_lock);
-  }
+  std::unique_lock locker{m_lock};
+  m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
 
   Listeners listeners(m_listeners);
   m_listener_notify = true;
-  m_lock.Unlock();
+  m_lock.unlock();
   for (auto listener : listeners) {
     listener->handle_close();
   }
 
-  m_lock.Lock();
+  m_lock.lock();
   m_listener_notify = false;
-  m_listener_cond.Signal();
+  m_listener_cond.notify_all();
 
   ceph_assert(m_state != STATE_UNINITIALIZED);
   if (m_state == STATE_CLOSED) {
@@ -624,25 +622,25 @@ void Journal<I>::close(Context *on_finish) {
 
 template <typename I>
 bool Journal<I>::is_tag_owner() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return is_tag_owner(m_lock);
 }
 
 template <typename I>
-bool Journal<I>::is_tag_owner(const Mutex &) const {
-  ceph_assert(m_lock.is_locked());
+bool Journal<I>::is_tag_owner(const ceph::mutex &) const {
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
 }
 
 template <typename I>
 uint64_t Journal<I>::get_tag_tid() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return m_tag_tid;
 }
 
 template <typename I>
 journal::TagData Journal<I>::get_tag_data() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   return m_tag_data;
 }
 
@@ -654,7 +652,7 @@ void Journal<I>::allocate_local_tag(Context *on_finish) {
   journal::TagPredecessor predecessor;
   predecessor.mirror_uuid = LOCAL_MIRROR_UUID;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock));
 
     cls::journal::Client client;
@@ -688,7 +686,7 @@ void Journal<I>::allocate_tag(const std::string &mirror_uuid,
   ldout(cct, 20) << this << " " << __func__ << ":  mirror_uuid=" << mirror_uuid
                  << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_journaler != nullptr);
 
   journal::TagData tag_data;
@@ -709,7 +707,7 @@ void Journal<I>::flush_commit_position(Context *on_finish) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_journaler != nullptr);
   m_journaler->flush_commit_position(on_finish);
 }
@@ -718,7 +716,7 @@ template <typename I>
 void Journal<I>::user_flushed() {
   if (m_state == STATE_READY && !m_user_flushed.exchange(true) &&
       m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (m_state == STATE_READY) {
       CephContext *cct = m_image_ctx.cct;
       ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -787,7 +785,7 @@ uint64_t Journal<I>::append_io_events(journal::EventType event_type,
 
   uint64_t tid;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_state == STATE_READY);
 
     tid = ++m_event_tid;
@@ -801,7 +799,7 @@ uint64_t Journal<I>::append_io_events(journal::EventType event_type,
   }
 
   {
-    Mutex::Locker event_locker(m_event_lock);
+    std::lock_guard event_locker{m_event_lock};
     m_events[tid] = Event(futures, offset, length, filter_ret_val);
   }
 
@@ -829,7 +827,7 @@ void Journal<I>::commit_io_event(uint64_t tid, int r) {
   ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
                  "r=" << r << dendl;
 
-  Mutex::Locker event_locker(m_event_lock);
+  std::lock_guard event_locker{m_event_lock};
   typename Events::iterator it = m_events.find(tid);
   if (it == m_events.end()) {
     return;
@@ -848,7 +846,7 @@ void Journal<I>::commit_io_event_extent(uint64_t tid, uint64_t offset,
                  << "length=" << length << ", "
                  << "r=" << r << dendl;
 
-  Mutex::Locker event_locker(m_event_lock);
+  std::lock_guard event_locker{m_event_lock};
   typename Events::iterator it = m_events.find(tid);
   if (it == m_events.end()) {
     return;
@@ -878,7 +876,7 @@ template <typename I>
 void Journal<I>::append_op_event(uint64_t op_tid,
                                  journal::EventEntry &&event_entry,
                                  Context *on_safe) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   bufferlist bl;
   event_entry.timestamp = ceph_clock_now();
@@ -886,7 +884,7 @@ void Journal<I>::append_op_event(uint64_t op_tid,
 
   Future future;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_state == STATE_READY);
 
     future = m_journaler->append(m_tag_tid, bl);
@@ -924,7 +922,7 @@ void Journal<I>::commit_op_event(uint64_t op_tid, int r, Context *on_safe) {
   Future op_start_future;
   Future op_finish_future;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_state == STATE_READY);
 
     // ready to commit op event
@@ -947,7 +945,7 @@ void Journal<I>::replay_op_ready(uint64_t op_tid, Context *on_resume) {
   ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << dendl;
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_journal_replay != nullptr);
     m_journal_replay->replay_op_ready(op_tid, on_resume);
   }
@@ -961,7 +959,7 @@ void Journal<I>::flush_event(uint64_t tid, Context *on_safe) {
 
   Future future;
   {
-    Mutex::Locker event_locker(m_event_lock);
+    std::lock_guard event_locker{m_event_lock};
     future = wait_event(m_lock, tid, on_safe);
   }
 
@@ -976,14 +974,14 @@ void Journal<I>::wait_event(uint64_t tid, Context *on_safe) {
   ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
                  << "on_safe=" << on_safe << dendl;
 
-  Mutex::Locker event_locker(m_event_lock);
+  std::lock_guard event_locker{m_event_lock};
   wait_event(m_lock, tid, on_safe);
 }
 
 template <typename I>
-typename Journal<I>::Future Journal<I>::wait_event(Mutex &lock, uint64_t tid,
+typename Journal<I>::Future Journal<I>::wait_event(ceph::mutex &lock, uint64_t tid,
                                                    Context *on_safe) {
-  ceph_assert(m_event_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_event_lock));
   CephContext *cct = m_image_ctx.cct;
 
   typename Events::iterator it = m_events.find(tid);
@@ -1009,7 +1007,7 @@ void Journal<I>::start_external_replay(journal::Replay<I> **journal_replay,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_READY);
   ceph_assert(m_journal_replay == nullptr);
 
@@ -1031,7 +1029,7 @@ void Journal<I>::handle_start_external_replay(int r,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_READY);
   ceph_assert(m_journal_replay == nullptr);
 
@@ -1057,7 +1055,7 @@ void Journal<I>::stop_external_replay() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_journal_replay != nullptr);
   ceph_assert(m_state == STATE_REPLAYING);
 
@@ -1077,7 +1075,7 @@ void Journal<I>::create_journaler() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
   ceph_assert(m_journaler == NULL);
 
@@ -1112,7 +1110,7 @@ void Journal<I>::destroy_journaler(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   delete m_journal_replay;
   m_journal_replay = NULL;
@@ -1126,7 +1124,7 @@ void Journal<I>::destroy_journaler(int r) {
       Journal<I>, &Journal<I>::handle_journal_destroyed>(this));
   ctx = new FunctionContext(
     [this, ctx](int r) {
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
       m_journaler->shut_down(ctx);
     });
   m_async_journal_op_tracker.wait(m_image_ctx, ctx);
@@ -1137,7 +1135,7 @@ void Journal<I>::recreate_journaler(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(m_state == STATE_FLUSHING_RESTART ||
               m_state == STATE_FLUSHING_REPLAY);
 
@@ -1154,7 +1152,7 @@ void Journal<I>::recreate_journaler(int r) {
 
 template <typename I>
 void Journal<I>::complete_event(typename Events::iterator it, int r) {
-  ceph_assert(m_event_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_event_lock));
   ceph_assert(m_state == STATE_READY);
 
   CephContext *cct = m_image_ctx.cct;
@@ -1188,7 +1186,7 @@ void Journal<I>::complete_event(typename Events::iterator it, int r) {
 
 template <typename I>
 void Journal<I>::start_append() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   m_journaler->start_append(
     m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
@@ -1207,7 +1205,7 @@ void Journal<I>::handle_open(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_INITIALIZING);
 
   if (r < 0) {
@@ -1234,7 +1232,7 @@ void Journal<I>::handle_replay_ready() {
   CephContext *cct = m_image_ctx.cct;
   ReplayEntry replay_entry;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (m_state != STATE_REPLAYING) {
       return;
     }
@@ -1273,7 +1271,7 @@ void Journal<I>::handle_replay_complete(int r) {
 
   bool cancel_ops = false;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (m_state != STATE_REPLAYING) {
       return;
     }
@@ -1294,7 +1292,7 @@ void Journal<I>::handle_replay_complete(int r) {
 
       State state;
       {
-        Mutex::Locker locker(m_lock);
+       std::lock_guard locker{m_lock};
         ceph_assert(m_state == STATE_FLUSHING_RESTART ||
                     m_state == STATE_FLUSHING_REPLAY);
         state = m_state;
@@ -1326,7 +1324,7 @@ void Journal<I>::handle_replay_process_ready(int r) {
 
   ceph_assert(r == 0);
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_processing_entry);
     m_processing_entry = false;
   }
@@ -1337,7 +1335,7 @@ template <typename I>
 void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
   CephContext *cct = m_image_ctx.cct;
 
-  m_lock.Lock();
+  std::unique_lock locker{m_lock};
   ceph_assert(m_state == STATE_REPLAYING ||
               m_state == STATE_FLUSHING_RESTART ||
               m_state == STATE_FLUSHING_REPLAY);
@@ -1353,7 +1351,7 @@ void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
     if (m_state == STATE_REPLAYING) {
       // abort the replay if we have an error
       transition_state(STATE_FLUSHING_RESTART, r);
-      m_lock.Unlock();
+      locker.unlock();
 
       // stop replay, shut down, and restart
       Context* ctx = create_context_callback<
@@ -1366,7 +1364,7 @@ void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
           ldout(cct, 20) << this << " handle_replay_process_safe: "
                          << "shut down replay" << dendl;
           {
-            Mutex::Locker locker(m_lock);
+           std::lock_guard locker{m_lock};
             ceph_assert(m_state == STATE_FLUSHING_RESTART);
           }
 
@@ -1377,19 +1375,17 @@ void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
     } else if (m_state == STATE_FLUSHING_REPLAY) {
       // end-of-replay flush in-progress -- we need to restart replay
       transition_state(STATE_FLUSHING_RESTART, r);
-      m_lock.Unlock();
       return;
     }
   } else {
     // only commit the entry if written successfully
     m_journaler->committed(replay_entry);
   }
-  m_lock.Unlock();
 }
 
 template <typename I>
 void Journal<I>::handle_flushing_restart(int r) {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
@@ -1406,7 +1402,7 @@ void Journal<I>::handle_flushing_restart(int r) {
 
 template <typename I>
 void Journal<I>::handle_flushing_replay() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
@@ -1434,7 +1430,7 @@ void Journal<I>::handle_recording_stopped(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_STOPPING);
 
   destroy_journaler(r);
@@ -1451,7 +1447,7 @@ void Journal<I>::handle_journal_destroyed(int r) {
                << dendl;
   }
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   delete m_journaler;
   m_journaler = nullptr;
 
@@ -1479,7 +1475,7 @@ void Journal<I>::handle_io_event_safe(int r, uint64_t tid) {
 
   Contexts on_safe_contexts;
   {
-    Mutex::Locker event_locker(m_event_lock);
+    std::lock_guard event_locker{m_event_lock};
     typename Events::iterator it = m_events.find(tid);
     ceph_assert(it != m_events.end());
 
@@ -1537,7 +1533,7 @@ void Journal<I>::handle_op_event_safe(int r, uint64_t tid,
 
 template <typename I>
 void Journal<I>::stop_recording() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(m_journaler != NULL);
 
   ceph_assert(m_state == STATE_READY);
@@ -1552,7 +1548,7 @@ template <typename I>
 void Journal<I>::transition_state(State state, int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl;
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   m_state = state;
 
   if (m_error_result == 0 && r < 0) {
@@ -1569,7 +1565,7 @@ void Journal<I>::transition_state(State state, int r) {
 
 template <typename I>
 bool Journal<I>::is_steady_state() const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   switch (m_state) {
   case STATE_READY:
   case STATE_CLOSED:
@@ -1589,7 +1585,7 @@ bool Journal<I>::is_steady_state() const {
 
 template <typename I>
 void Journal<I>::wait_for_steady_state(Context *on_state) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!is_steady_state());
 
   CephContext *cct = m_image_ctx.cct;
@@ -1600,7 +1596,7 @@ void Journal<I>::wait_for_steady_state(Context *on_state) {
 
 template <typename I>
 int Journal<I>::is_resync_requested(bool *do_resync) {
-  Mutex::Locker l(m_lock);
+  std::lock_guard l{m_lock};
   return check_resync_requested(do_resync);
 }
 
@@ -1609,7 +1605,7 @@ int Journal<I>::check_resync_requested(bool *do_resync) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(do_resync != nullptr);
 
   cls::journal::Client client;
@@ -1647,13 +1643,13 @@ struct C_RefreshTags : public Context {
   util::AsyncOpTracker &async_op_tracker;
   Context *on_finish = nullptr;
 
-  Mutex lock;
+  ceph::mutex lock =
+    ceph::make_mutex("librbd::Journal::C_RefreshTags::lock");
   uint64_t tag_tid = 0;
   journal::TagData tag_data;
 
   explicit C_RefreshTags(util::AsyncOpTracker &async_op_tracker)
-    : async_op_tracker(async_op_tracker),
-      lock("librbd::Journal::C_RefreshTags::lock") {
+    : async_op_tracker(async_op_tracker) {
     async_op_tracker.start_op();
   }
   ~C_RefreshTags() override {
@@ -1668,7 +1664,7 @@ struct C_RefreshTags : public Context {
 template <typename I>
 void Journal<I>::handle_metadata_updated() {
   CephContext *cct = m_image_ctx.cct;
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
     return;
@@ -1704,7 +1700,7 @@ void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
                                          uint64_t tag_tid,
                                          journal::TagData tag_data, int r) {
   CephContext *cct = m_image_ctx.cct;
-  Mutex::Locker locker(m_lock);
+  std::unique_lock locker{m_lock};
 
   if (r < 0) {
     lderr(cct) << this << " " << __func__ << ": failed to refresh metadata: "
@@ -1721,9 +1717,7 @@ void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
                  << "refresh_sequence=" << refresh_sequence << ", "
                  << "tag_tid=" << tag_tid << ", "
                  << "tag_data=" << tag_data << dendl;
-  while (m_listener_notify) {
-    m_listener_cond.Wait(m_lock);
-  }
+  m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
 
   bool was_tag_owner = is_tag_owner(m_lock);
   if (m_tag_tid < tag_tid) {
@@ -1742,7 +1736,7 @@ void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
 
   Listeners listeners(m_listeners);
   m_listener_notify = true;
-  m_lock.Unlock();
+  m_lock.unlock();
 
   if (promoted_to_primary) {
     for (auto listener : listeners) {
@@ -1754,23 +1748,21 @@ void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
     }
   }
 
-  m_lock.Lock();
+  m_lock.lock();
   m_listener_notify = false;
-  m_listener_cond.Signal();
+  m_listener_cond.notify_all();
 }
 
 template <typename I>
 void Journal<I>::add_listener(journal::Listener *listener) {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   m_listeners.insert(listener);
 }
 
 template <typename I>
 void Journal<I>::remove_listener(journal::Listener *listener) {
-  Mutex::Locker locker(m_lock);
-  while (m_listener_notify) {
-    m_listener_cond.Wait(m_lock);
-  }
+  std::unique_lock locker{m_lock};
+  m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
   m_listeners.erase(listener);
 }
 
index e63cc4a71377e6bba023e6c4a3d88e4bec08af50..13be62aa1635dc925505bacf2144495029d11694 100644 (file)
@@ -9,8 +9,6 @@
 #include "include/interval_set.h"
 #include "include/rados/librados_fwd.hpp"
 #include "common/Cond.h"
-#include "common/Mutex.h"
-#include "common/Cond.h"
 #include "common/WorkQueue.h"
 #include "journal/Future.h"
 #include "journal/JournalMetadataListener.h"
@@ -272,10 +270,10 @@ private:
 
   ContextWQ *m_work_queue = nullptr;
   SafeTimer *m_timer = nullptr;
-  Mutex *m_timer_lock = nullptr;
+  ceph::mutex *m_timer_lock = nullptr;
 
   Journaler *m_journaler;
-  mutable Mutex m_lock;
+  mutable ceph::mutex m_lock = ceph::make_mutex("Journal<I>::m_lock");
   State m_state;
   uint64_t m_max_append_size = 0;
   uint64_t m_tag_class = 0;
@@ -289,7 +287,7 @@ private:
   ReplayHandler m_replay_handler;
   bool m_close_pending;
 
-  Mutex m_event_lock;
+  ceph::mutex m_event_lock = ceph::make_mutex("Journal<I>::m_event_lock");
   uint64_t m_event_tid;
   Events m_events;
 
@@ -320,19 +318,19 @@ private:
 
   typedef std::set<journal::Listener *> Listeners;
   Listeners m_listeners;
-  Cond m_listener_cond;
+  ceph::condition_variable m_listener_cond;
   bool m_listener_notify = false;
 
   uint64_t m_refresh_sequence = 0;
 
-  bool is_journal_replaying(const Mutex &) const;
-  bool is_tag_owner(const Mutex &) const;
+  bool is_journal_replaying(const ceph::mutex &) const;
+  bool is_tag_owner(const ceph::mutex &) const;
 
   uint64_t append_io_events(journal::EventType event_type,
                             const Bufferlists &bufferlists,
                             uint64_t offset, size_t length, bool flush_entry,
                             int filter_ret_val);
-  Future wait_event(Mutex &lock, uint64_t tid, Context *on_safe);
+  Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe);
 
   void create_journaler();
   void destroy_journaler(int r);
index 15d2016f26a3216c0af880956f23853d970ed77d..28867a98733e3c800ca5b991684e20f8031f30bb 100644 (file)
@@ -67,7 +67,7 @@ ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue,
                             const string& oid, Watcher *watcher, Mode mode,
                             bool blacklist_on_break_lock,
                             uint32_t blacklist_expire_seconds)
-  : m_lock(unique_lock_name("librbd::ManagedLock<I>::m_lock", this)),
+  : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
     m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
     m_work_queue(work_queue),
     m_oid(oid),
@@ -80,7 +80,7 @@ ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue,
 
 template <typename I>
 ManagedLock<I>::~ManagedLock() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
               m_state == STATE_UNINITIALIZED);
   if (m_state == STATE_UNINITIALIZED) {
@@ -95,15 +95,15 @@ ManagedLock<I>::~ManagedLock() {
 
 template <typename I>
 bool ManagedLock<I>::is_lock_owner() const {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   return is_lock_owner(m_lock);
 }
 
 template <typename I>
-bool ManagedLock<I>::is_lock_owner(Mutex &lock) const {
+bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   bool lock_owner;
 
@@ -128,7 +128,7 @@ template <typename I>
 void ManagedLock<I>::shut_down(Context *on_shut_down) {
   ldout(m_cct, 10) << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(!is_state_shutdown());
 
   if (m_state == STATE_WAITING_FOR_REGISTER) {
@@ -147,7 +147,7 @@ template <typename I>
 void ManagedLock<I>::acquire_lock(Context *on_acquired) {
   int r = 0;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (is_state_shutdown()) {
       r = -ESHUTDOWN;
     } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
@@ -166,7 +166,7 @@ template <typename I>
 void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
   int r = 0;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (is_state_shutdown()) {
       r = -ESHUTDOWN;
     } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
@@ -185,7 +185,7 @@ template <typename I>
 void ManagedLock<I>::release_lock(Context *on_released) {
   int r = 0;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (is_state_shutdown()) {
       r = -ESHUTDOWN;
     } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
@@ -203,7 +203,7 @@ void ManagedLock<I>::release_lock(Context *on_released) {
 template <typename I>
 void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
 
     if (m_state == STATE_WAITING_FOR_REGISTER) {
       // restart the acquire lock process now that watch is valid
@@ -237,7 +237,7 @@ void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
 
   int r;
   {
-    Mutex::Locker l(m_lock);
+    std::lock_guard l{m_lock};
     if (is_state_shutdown()) {
       r = -ESHUTDOWN;
     } else {
@@ -259,7 +259,7 @@ void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
 
   int r;
   {
-    Mutex::Locker l(m_lock);
+    std::lock_guard l{m_lock};
     if (is_state_shutdown()) {
       r = -ESHUTDOWN;
     } else if (is_lock_owner(m_lock)) {
@@ -284,7 +284,7 @@ int ManagedLock<I>::assert_header_locked() {
 
   librados::ObjectReadOperation op;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
                                     (m_mode == EXCLUSIVE ? LOCK_EXCLUSIVE :
                                                            LOCK_SHARED),
@@ -371,7 +371,7 @@ bool ManagedLock<I>::is_transition_state() const {
 
 template <typename I>
 void ManagedLock<I>::append_context(Action action, Context *ctx) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   for (auto &action_ctxs : m_actions_contexts) {
     if (action == action_ctxs.first) {
@@ -391,7 +391,7 @@ void ManagedLock<I>::append_context(Action action, Context *ctx) {
 
 template <typename I>
 void ManagedLock<I>::execute_action(Action action, Context *ctx) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   append_context(action, ctx);
   if (!is_transition_state()) {
@@ -401,7 +401,7 @@ void ManagedLock<I>::execute_action(Action action, Context *ctx) {
 
 template <typename I>
 void ManagedLock<I>::execute_next_action() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!m_actions_contexts.empty());
   switch (get_active_action()) {
   case ACTION_ACQUIRE_LOCK:
@@ -425,25 +425,25 @@ void ManagedLock<I>::execute_next_action() {
 
 template <typename I>
 typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!m_actions_contexts.empty());
   return m_actions_contexts.front().first;
 }
 
 template <typename I>
 void ManagedLock<I>::complete_active_action(State next_state, int r) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(!m_actions_contexts.empty());
 
   ActionContexts action_contexts(std::move(m_actions_contexts.front()));
   m_actions_contexts.pop_front();
   m_state = next_state;
 
-  m_lock.Unlock();
+  m_lock.unlock();
   for (auto ctx : action_contexts.second) {
     ctx->complete(r);
   }
-  m_lock.Lock();
+  m_lock.lock();
 
   if (!is_transition_state() && !m_actions_contexts.empty()) {
     execute_next_action();
@@ -452,7 +452,7 @@ void ManagedLock<I>::complete_active_action(State next_state, int r) {
 
 template <typename I>
 bool ManagedLock<I>::is_state_shutdown() const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   switch (m_state) {
   case STATE_PRE_SHUTTING_DOWN:
@@ -469,7 +469,7 @@ bool ManagedLock<I>::is_state_shutdown() const {
 
 template <typename I>
 void ManagedLock<I>::send_acquire_lock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   if (m_state == STATE_LOCKED) {
     complete_active_action(STATE_LOCKED, 0);
     return;
@@ -541,7 +541,7 @@ template <typename I>
 void ManagedLock<I>::handle_post_acquire_lock(int r) {
   ldout(m_cct, 10) << "r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   if (r < 0 && m_post_next_state == STATE_LOCKED) {
     // release_lock without calling pre and post handlers
@@ -560,7 +560,7 @@ void ManagedLock<I>::revert_to_unlock_state(int r) {
   ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
       m_work_queue, m_oid, m_cookie,
       new FunctionContext([this, r](int ret) {
-        Mutex::Locker locker(m_lock);
+        std::lock_guard locker{m_lock};
         ceph_assert(ret == 0);
         complete_active_action(STATE_UNLOCKED, r);
       }));
@@ -569,7 +569,7 @@ void ManagedLock<I>::revert_to_unlock_state(int r) {
 
 template <typename I>
 void ManagedLock<I>::send_reacquire_lock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   if (m_state != STATE_LOCKED) {
     complete_active_action(m_state, 0);
@@ -618,7 +618,7 @@ template <typename I>
 void ManagedLock<I>::handle_reacquire_lock(int r) {
   ldout(m_cct, 10) << "r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_REACQUIRING);
 
   if (r < 0) {
@@ -647,7 +647,7 @@ void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
 
 template <typename I>
 void ManagedLock<I>::release_acquire_lock() {
-  assert(m_lock.is_locked());
+  assert(ceph_mutex_is_locked(m_lock));
 
   if (!is_state_shutdown()) {
     // queue a release and re-acquire of the lock since cookie cannot
@@ -672,7 +672,7 @@ void ManagedLock<I>::release_acquire_lock() {
 
 template <typename I>
 void ManagedLock<I>::send_release_lock() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   if (m_state == STATE_UNLOCKED) {
     complete_active_action(STATE_UNLOCKED, 0);
     return;
@@ -692,7 +692,7 @@ void ManagedLock<I>::handle_pre_release_lock(int r) {
   ldout(m_cct, 10) << "r=" << r << dendl;
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_state == STATE_PRE_RELEASING);
     m_state = STATE_RELEASING;
   }
@@ -714,7 +714,7 @@ template <typename I>
 void ManagedLock<I>::handle_release_lock(int r) {
   ldout(m_cct, 10) << "r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_state == STATE_RELEASING);
 
   if (r >= 0 || r == -EBLACKLISTED || r == -ENOENT) {
@@ -734,14 +734,14 @@ template <typename I>
 void ManagedLock<I>::handle_post_release_lock(int r) {
   ldout(m_cct, 10) << "r=" << r << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   complete_active_action(m_post_next_state, r);
 }
 
 template <typename I>
 void ManagedLock<I>::send_shutdown() {
   ldout(m_cct, 10) << dendl;
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   if (m_state == STATE_UNLOCKED) {
     m_state = STATE_SHUTTING_DOWN;
     m_work_queue->queue(new FunctionContext([this](int r) {
@@ -754,9 +754,9 @@ void ManagedLock<I>::send_shutdown() {
   ceph_assert(m_state == STATE_LOCKED);
   m_state = STATE_PRE_SHUTTING_DOWN;
 
-  m_lock.Unlock();
+  m_lock.unlock();
   m_work_queue->queue(new C_ShutDownRelease(this), 0);
-  m_lock.Lock();
+  m_lock.lock();
 }
 
 template <typename I>
@@ -770,7 +770,7 @@ template <typename I>
 void ManagedLock<I>::send_shutdown_release() {
   ldout(m_cct, 10) << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   m_work_queue->queue(new FunctionContext([this](int r) {
     pre_release_lock_handler(true, create_context_callback<
@@ -784,7 +784,7 @@ void ManagedLock<I>::handle_shutdown_pre_release(int r) {
 
   std::string cookie;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     cookie = m_cookie;
 
     ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
@@ -832,8 +832,8 @@ void ManagedLock<I>::complete_shutdown(int r) {
 
   ActionContexts action_contexts;
   {
-    Mutex::Locker locker(m_lock);
-    ceph_assert(m_lock.is_locked());
+    std::lock_guard locker{m_lock};
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_actions_contexts.size() == 1);
 
     action_contexts = std::move(m_actions_contexts.front());
index b27e549dab470db0c7a09230306f7cd5bddd55a2..3e03d00ed2da2c3abf2a63c06f8b2966ff5d35a2 100644 (file)
@@ -8,7 +8,6 @@
 #include "include/Context.h"
 #include "include/rados/librados.hpp"
 #include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
 #include "cls/lock/cls_lock_types.h"
 #include "librbd/watcher/Types.h"
 #include "librbd/managed_lock/Types.h"
@@ -63,67 +62,67 @@ public:
   int assert_header_locked();
 
   bool is_shutdown() const {
-    Mutex::Locker l(m_lock);
+    std::lock_guard l{m_lock};
     return is_state_shutdown();
   }
 
 protected:
-  mutable Mutex m_lock;
+  mutable ceph::mutex m_lock;
 
   inline void set_state_uninitialized() {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_state == STATE_UNLOCKED);
     m_state = STATE_UNINITIALIZED;
   }
   inline void set_state_initializing() {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_state == STATE_UNINITIALIZED);
     m_state = STATE_INITIALIZING;
   }
   inline void set_state_unlocked() {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
     m_state = STATE_UNLOCKED;
   }
   inline void set_state_waiting_for_lock() {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_state == STATE_ACQUIRING);
     m_state = STATE_WAITING_FOR_LOCK;
   }
   inline void set_state_post_acquiring() {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     ceph_assert(m_state == STATE_ACQUIRING);
     m_state = STATE_POST_ACQUIRING;
   }
 
   bool is_state_shutdown() const;
   inline bool is_state_acquiring() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_ACQUIRING;
   }
   inline bool is_state_post_acquiring() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_POST_ACQUIRING;
   }
   inline bool is_state_releasing() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_RELEASING;
   }
   inline bool is_state_pre_releasing() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_PRE_RELEASING;
   }
   inline bool is_state_locked() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_LOCKED;
   }
   inline bool is_state_waiting_for_lock() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return m_state == STATE_WAITING_FOR_LOCK;
   }
 
   inline bool is_action_acquire_lock() const {
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
     return get_active_action() == ACTION_ACQUIRE_LOCK;
   }
 
@@ -228,7 +227,7 @@ private:
   ActionsContexts m_actions_contexts;
   AsyncOpTracker m_async_op_tracker;
 
-  bool is_lock_owner(Mutex &lock) const;
+  bool is_lock_owner(ceph::mutex &lock) const;
   bool is_transition_state() const;
 
   void append_context(Action action, Context *ctx);
index ad86a8c7b162e260a69a353f91e5a32c9288919e..b7f3f06e640ae94c5b01fa640c779d1fb9bb27be 100644 (file)
@@ -35,7 +35,7 @@ namespace librbd {
 template <typename I>
 ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
   : m_image_ctx(image_ctx), m_snap_id(snap_id),
-    m_lock(util::unique_lock_name("librbd::ObjectMap::lock", this)),
+    m_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ObjectMap::lock", this))),
     m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
 }
 
@@ -72,7 +72,7 @@ bool ObjectMap<I>::is_compatible(const file_layout_t& layout, uint64_t size) {
 template <typename I>
 uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
 {
-  RWLock::RLocker locker(m_lock);
+  std::shared_lock locker{m_lock};
   ceph_assert(object_no < m_object_map.size());
   return m_object_map[object_no];
 }
@@ -80,7 +80,7 @@ uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
 template <typename I>
 bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
 {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
 
   // Fall back to default logic if object map is disabled or invalid
   if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
@@ -106,7 +106,7 @@ bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
 template <typename I>
 bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
 {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
 
   // Fall back to default logic if object map is disabled or invalid
   if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
@@ -132,7 +132,7 @@ bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
 template <typename I>
 bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
                                    uint8_t new_state) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   uint8_t state = *it;
   if ((state == new_state) ||
       (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
@@ -162,20 +162,20 @@ void ObjectMap<I>::close(Context *on_finish) {
 
 template <typename I>
 bool ObjectMap<I>::set_object_map(ceph::BitVector<2> &target_object_map) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
                                         m_image_ctx.image_lock));
-  RWLock::WLocker locker(m_lock);
+  std::unique_lock locker{m_lock};
   m_object_map = target_object_map;
   return true;
 }
 
 template <typename I>
 void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
 
-  RWLock::WLocker locker(m_lock);
+  std::unique_lock locker{m_lock};
   object_map::SnapshotRollbackRequest *req =
     new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish);
   req->send();
@@ -183,7 +183,7 @@ void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
 
 template <typename I>
 void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
   ceph_assert(snap_id != CEPH_NOSNAP);
 
@@ -195,7 +195,7 @@ void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
 
 template <typename I>
 void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
-  ceph_assert(m_image_ctx.image_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
   ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
   ceph_assert(snap_id != CEPH_NOSNAP);
 
@@ -207,11 +207,11 @@ void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
 
 template <typename I>
 void ObjectMap<I>::aio_save(Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
                                         m_image_ctx.image_lock));
-  RWLock::RLocker locker(m_lock);
+  std::shared_lock locker{m_lock};
 
   librados::ObjectWriteOperation op;
   if (m_snap_id == CEPH_NOSNAP) {
@@ -230,8 +230,8 @@ void ObjectMap<I>::aio_save(Context *on_finish) {
 template <typename I>
 void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
                              Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
                                         m_image_ctx.image_lock));
   ceph_assert(m_image_ctx.image_watcher != NULL);
@@ -249,8 +249,8 @@ void ObjectMap<I>::detained_aio_update(UpdateOperation &&op) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << dendl;
 
-  ceph_assert(m_image_ctx.image_lock.is_locked());
-  ceph_assert(m_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+  ceph_assert(ceph_mutex_is_wlocked(m_lock));
 
   BlockGuardCell *cell;
   int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
@@ -290,8 +290,8 @@ void ObjectMap<I>::handle_detained_aio_update(BlockGuardCell *cell, int r,
   m_update_guard->release(cell, &block_ops);
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
-    RWLock::WLocker locker(m_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
+    std::unique_lock locker{m_lock};
     for (auto &op : block_ops) {
       detained_aio_update(std::move(op));
     }
@@ -306,7 +306,7 @@ void ObjectMap<I>::aio_update(uint64_t snap_id, uint64_t start_object_no,
                               const boost::optional<uint8_t> &current_state,
                               const ZTracer::Trace &parent_trace,
                               bool ignore_enoent, Context *on_finish) {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
   ceph_assert(m_image_ctx.image_watcher != nullptr);
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
@@ -320,7 +320,7 @@ void ObjectMap<I>::aio_update(uint64_t snap_id, uint64_t start_object_no,
                        stringify(static_cast<uint32_t>(*current_state)) : "")
                 << "->" << static_cast<uint32_t>(new_state) << dendl;
   if (snap_id == CEPH_NOSNAP) {
-    ceph_assert(m_lock.is_wlocked());
+    ceph_assert(ceph_mutex_is_wlocked(m_lock));
     end_object_no = std::min(end_object_no, m_object_map.size());
     if (start_object_no >= end_object_no) {
       ldout(cct, 20) << "skipping update of invalid object map" << dendl;
index 3f7cdd04e0152fdaff203216e9b2547cc06409ea..0d4264887bc2b57fa05d983c62e1ec4bf2808c94 100644 (file)
@@ -40,13 +40,13 @@ public:
 
   uint8_t operator[](uint64_t object_no) const;
   inline uint64_t size() const {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     return m_object_map.size();
   }
 
   inline void set_state(uint64_t object_no, uint8_t new_state,
                         const boost::optional<uint8_t> &current_state) {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     ceph_assert(object_no < m_object_map.size());
     if (current_state && m_object_map[object_no] != *current_state) {
       return;
@@ -81,7 +81,7 @@ public:
                   const ZTracer::Trace &parent_trace, bool ignore_enoent,
                   T *callback_object) {
     ceph_assert(start_object_no < end_object_no);
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
 
     if (snap_id == CEPH_NOSNAP) {
       end_object_no = std::min(end_object_no, m_object_map.size());
@@ -146,7 +146,7 @@ private:
   ImageCtxT &m_image_ctx;
   uint64_t m_snap_id;
 
-  RWLock m_lock;
+  mutable ceph::shared_mutex m_lock;
   ceph::BitVector<2> m_object_map;
 
   UpdateGuard *m_update_guard = nullptr;
index eaee012350e1b8eb8b9c7cfe2fa4221235a5ca2e..cfb9b4467f5f38fd822514723f8cb7a28efddfbb 100644 (file)
@@ -176,24 +176,24 @@ struct C_InvokeAsyncRequest : public Context {
 
   void send_acquire_exclusive_lock() {
     // context can complete before owner_lock is unlocked
-    RWLock &owner_lock(image_ctx.owner_lock);
-    owner_lock.get_read();
-    image_ctx.image_lock.get_read();
+    ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
+    owner_lock.lock_shared();
+    image_ctx.image_lock.lock_shared();
     if (image_ctx.read_only ||
         (!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) {
-      image_ctx.image_lock.put_read();
-      owner_lock.put_read();
+      image_ctx.image_lock.unlock_shared();
+      owner_lock.unlock_shared();
       complete(-EROFS);
       return;
     }
-    image_ctx.image_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
 
     if (image_ctx.exclusive_lock == nullptr) {
       send_local_request();
-      owner_lock.put_read();
+      owner_lock.unlock_shared();
       return;
     } else if (image_ctx.image_watcher == nullptr) {
-      owner_lock.put_read();
+      owner_lock.unlock_shared();
       complete(-EROFS);
       return;
     }
@@ -201,7 +201,7 @@ struct C_InvokeAsyncRequest : public Context {
     if (image_ctx.exclusive_lock->is_lock_owner() &&
         image_ctx.exclusive_lock->accept_requests()) {
       send_local_request();
-      owner_lock.put_read();
+      owner_lock.unlock_shared();
       return;
     }
 
@@ -221,7 +221,7 @@ struct C_InvokeAsyncRequest : public Context {
     } else {
       image_ctx.exclusive_lock->try_acquire_lock(ctx);
     }
-    owner_lock.put_read();
+    owner_lock.unlock_shared();
   }
 
   void handle_acquire_exclusive_lock(int r) {
@@ -234,21 +234,21 @@ struct C_InvokeAsyncRequest : public Context {
     }
 
     // context can complete before owner_lock is unlocked
-    RWLock &owner_lock(image_ctx.owner_lock);
-    owner_lock.get_read();
+    ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
+    owner_lock.lock_shared();
     if (image_ctx.exclusive_lock == nullptr ||
         image_ctx.exclusive_lock->is_lock_owner()) {
       send_local_request();
-      owner_lock.put_read();
+      owner_lock.unlock_shared();
       return;
     }
 
     send_remote_request();
-    owner_lock.put_read();
+    owner_lock.unlock_shared();
   }
 
   void send_remote_request() {
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
     CephContext *cct = image_ctx.cct;
     ldout(cct, 20) << __func__ << dendl;
@@ -283,7 +283,7 @@ struct C_InvokeAsyncRequest : public Context {
   }
 
   void send_local_request() {
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
     CephContext *cct = image_ctx.cct;
     ldout(cct, 20) << __func__ << dendl;
@@ -348,7 +348,7 @@ int Operations<I>::flatten(ProgressContext &prog_ctx) {
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     if (m_image_ctx.parent_md.spec.pool_id == -1) {
       lderr(cct) << "image has no parent" << dendl;
       return -EINVAL;
@@ -373,7 +373,7 @@ int Operations<I>::flatten(ProgressContext &prog_ctx) {
 template <typename I>
 void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
                                     Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -385,18 +385,18 @@ void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
 
   // can't flatten a non-clone
   if (m_image_ctx.parent_md.spec.pool_id == -1) {
     lderr(cct) << "image has no parent" << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EINVAL);
     return;
   }
   if (m_image_ctx.snap_id != CEPH_NOSNAP) {
     lderr(cct) << "snapshots cannot be flattened" << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EROFS);
     return;
   }
@@ -411,7 +411,7 @@ void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
   uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout,
                                                       overlap);
 
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   operation::FlattenRequest<I> *req = new operation::FlattenRequest<I>(
     m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), overlap_objects,
@@ -447,7 +447,7 @@ int Operations<I>::rebuild_object_map(ProgressContext &prog_ctx) {
 template <typename I>
 void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
                                                Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -494,7 +494,7 @@ template <typename I>
 void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
                                       operation::ObjectIterateWork<I> handle_mismatch,
                                       Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -545,7 +545,7 @@ int Operations<I>::rename(const char *dstname) {
   } else {
     C_SaferCond cond_ctx;
     {
-      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      std::shared_lock owner_lock{m_image_ctx.owner_lock};
       execute_rename(dstname, &cond_ctx);
     }
 
@@ -562,7 +562,7 @@ int Operations<I>::rename(const char *dstname) {
 template <typename I>
 void Operations<I>::execute_rename(const std::string &dest_name,
                                    Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
     ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
                 m_image_ctx.exclusive_lock->is_lock_owner());
@@ -573,13 +573,13 @@ void Operations<I>::execute_rename(const std::string &dest_name,
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   if (m_image_ctx.name == dest_name) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EEXIST);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name
@@ -595,7 +595,7 @@ void Operations<I>::execute_rename(const std::string &dest_name,
        m_image_ctx.image_watcher->register_watch(on_finish);
       });
     on_finish = new FunctionContext([this, dest_name, on_finish](int r) {
-        RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+        std::shared_lock owner_locker{m_image_ctx.owner_lock};
        operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
          m_image_ctx, on_finish, dest_name);
        req->send();
@@ -612,11 +612,11 @@ template <typename I>
 int Operations<I>::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) {
   CephContext *cct = m_image_ctx.cct;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   ldout(cct, 5) << this << " " << __func__ << ": "
                 << "size=" << m_image_ctx.size << ", "
                 << "new_size=" << size << dendl;
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   int r = m_image_ctx.state->refresh_if_required();
   if (r < 0) {
@@ -646,29 +646,29 @@ template <typename I>
 void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
                                    Context *on_finish,
                                    uint64_t journal_op_tid) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
   CephContext *cct = m_image_ctx.cct;
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   ldout(cct, 5) << this << " " << __func__ << ": "
                 << "size=" << m_image_ctx.size << ", "
                 << "new_size=" << size << dendl;
 
   if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only ||
       m_image_ctx.operations_disabled) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EROFS);
     return;
   } else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
                                        m_image_ctx.image_lock) &&
              !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EINVAL);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   operation::ResizeRequest<I> *req = new operation::ResizeRequest<I>(
     m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), size, allow_shrink,
@@ -713,13 +713,13 @@ void Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespac
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EEXIST);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
     m_image_ctx, "snap_create", true,
@@ -737,7 +737,7 @@ void Operations<I>::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_
                                         Context *on_finish,
                                         uint64_t journal_op_tid,
                                         bool skip_object_map) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -750,13 +750,13 @@ void Operations<I>::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EEXIST);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   operation::SnapshotCreateRequest<I> *req =
     new operation::SnapshotCreateRequest<I>(
@@ -779,10 +779,10 @@ int Operations<I>::snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespa
 
   C_SaferCond cond_ctx;
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     {
       // need to drop image_lock before invalidating cache
-      RWLock::RLocker image_locker(m_image_ctx.image_lock);
+      std::shared_lock image_locker{m_image_ctx.image_lock};
       if (!m_image_ctx.snap_exists) {
         return -ENOENT;
       }
@@ -820,7 +820,7 @@ void Operations<I>::execute_snap_rollback(const cls::rbd::SnapshotNamespace& sna
                                          const std::string &snap_name,
                                           ProgressContext& prog_ctx,
                                           Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
                 << dendl;
@@ -830,17 +830,17 @@ void Operations<I>::execute_snap_rollback(const cls::rbd::SnapshotNamespace& sna
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
   if (snap_id == CEPH_NOSNAP) {
     lderr(cct) << "No such snapshot found." << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-ENOENT);
     return;
   }
 
   uint64_t new_size = m_image_ctx.get_image_size(snap_id);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   // async mode used for journal replay
   operation::SnapshotRollbackRequest<I> *request =
@@ -888,16 +888,16 @@ void Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespac
   }
 
   // quickly filter out duplicate ops
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-ENOENT);
     return;
   }
 
   bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 ||
                    (m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   if (proxy_op) {
     C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
@@ -908,7 +908,7 @@ void Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespac
       {-ENOENT}, on_finish);
     req->send();
   } else {
-    RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+    std::shared_lock owner_lock{m_image_ctx.owner_lock};
     execute_snap_remove(snap_namespace, snap_name, on_finish);
   }
 }
@@ -917,7 +917,7 @@ template <typename I>
 void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
                                        const std::string &snap_name,
                                         Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   {
     if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
       ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
@@ -934,11 +934,11 @@ void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
   if (snap_id == CEPH_NOSNAP) {
     lderr(m_image_ctx.cct) << "No such snapshot found." << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-ENOENT);
     return;
   }
@@ -946,16 +946,16 @@ void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_
   bool is_protected;
   int r = m_image_ctx.is_snap_protected(snap_id, &is_protected);
   if (r < 0) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(r);
     return;
   } else if (is_protected) {
     lderr(m_image_ctx.cct) << "snapshot is protected" << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EBUSY);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   operation::SnapshotRemoveRequest<I> *req =
     new operation::SnapshotRemoveRequest<I>(
@@ -981,7 +981,7 @@ int Operations<I>::snap_rename(const char *srcname, const char *dstname) {
     return r;
 
   {
-    RWLock::RLocker l(m_image_ctx.image_lock);
+    std::shared_lock l{m_image_ctx.image_lock};
     snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname);
     if (snap_id == CEPH_NOSNAP) {
       return -ENOENT;
@@ -1004,7 +1004,7 @@ int Operations<I>::snap_rename(const char *srcname, const char *dstname) {
   } else {
     C_SaferCond cond_ctx;
     {
-      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      std::shared_lock owner_lock{m_image_ctx.owner_lock};
       execute_snap_rename(snap_id, dstname, &cond_ctx);
     }
 
@@ -1022,7 +1022,7 @@ template <typename I>
 void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
                                         const std::string &dest_snap_name,
                                         Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
     ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
                 m_image_ctx.exclusive_lock->is_lock_owner());
@@ -1033,15 +1033,15 @@ void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(),
                              dest_snap_name) != CEPH_NOSNAP) {
     // Renaming is supported for snapshots from user namespace only.
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EEXIST);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": "
@@ -1077,7 +1077,7 @@ int Operations<I>::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespac
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     bool is_protected;
     r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
                                       &is_protected);
@@ -1103,7 +1103,7 @@ int Operations<I>::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespac
   } else {
     C_SaferCond cond_ctx;
     {
-      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      std::shared_lock owner_lock{m_image_ctx.owner_lock};
       execute_snap_protect(snap_namespace, snap_name, &cond_ctx);
     }
 
@@ -1119,7 +1119,7 @@ template <typename I>
 void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
                                         const std::string &snap_name,
                                          Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
     ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
                 m_image_ctx.exclusive_lock->is_lock_owner());
@@ -1130,20 +1130,20 @@ void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   bool is_protected;
   int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
                                         &is_protected);
   if (r < 0) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(r);
     return;
   } else if (is_protected) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EBUSY);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
@@ -1172,7 +1172,7 @@ int Operations<I>::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namesp
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     bool is_unprotected;
     r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
                                   &is_unprotected);
@@ -1198,7 +1198,7 @@ int Operations<I>::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namesp
   } else {
     C_SaferCond cond_ctx;
     {
-      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      std::shared_lock owner_lock{m_image_ctx.owner_lock};
       execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx);
     }
 
@@ -1214,7 +1214,7 @@ template <typename I>
 void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
                                           const std::string &snap_name,
                                            Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
     ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
                 m_image_ctx.exclusive_lock->is_lock_owner());
@@ -1225,20 +1225,20 @@ void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& sn
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   bool is_unprotected;
   int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
                                           &is_unprotected);
   if (r < 0) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(r);
     return;
   } else if (is_unprotected) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EINVAL);
     return;
   }
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
@@ -1266,7 +1266,7 @@ int Operations<I>::snap_set_limit(uint64_t limit) {
 
   C_SaferCond limit_ctx;
   {
-    RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+    std::shared_lock owner_lock{m_image_ctx.owner_lock};
     r = prepare_image_update(true);
     if (r < 0) {
       return r;
@@ -1282,7 +1282,7 @@ int Operations<I>::snap_set_limit(uint64_t limit) {
 template <typename I>
 void Operations<I>::execute_snap_set_limit(const uint64_t limit,
                                           Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
@@ -1334,7 +1334,7 @@ int Operations<I>::update_features(uint64_t features, bool enabled) {
     return -EINVAL;
   }
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     if (enabled && (features & m_image_ctx.features) != 0) {
       lderr(cct) << "one or more requested features are already enabled"
                 << dendl;
@@ -1351,13 +1351,13 @@ int Operations<I>::update_features(uint64_t features, bool enabled) {
   // when acquiring the exclusive lock in case the journal is corrupt
   bool disabling_journal = false;
   if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) {
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     m_image_ctx.set_journal_policy(new journal::DisabledPolicy());
     disabling_journal = true;
   }
   BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) {
     if (disabling_journal) {
-      RWLock::WLocker image_locker(m_image_ctx.image_lock);
+      std::unique_lock image_locker{m_image_ctx.image_lock};
       m_image_ctx.set_journal_policy(
         new journal::StandardPolicy<I>(&m_image_ctx));
     }
@@ -1370,7 +1370,7 @@ int Operations<I>::update_features(uint64_t features, bool enabled) {
   if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) {
     C_SaferCond cond_ctx;
     {
-      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      std::shared_lock owner_lock{m_image_ctx.owner_lock};
       r = prepare_image_update(true);
       if (r < 0) {
         return r;
@@ -1396,7 +1396,7 @@ template <typename I>
 void Operations<I>::execute_update_features(uint64_t features, bool enabled,
                                             Context *on_finish,
                                             uint64_t journal_op_tid) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -1455,7 +1455,7 @@ int Operations<I>::metadata_set(const std::string &key,
 
   C_SaferCond metadata_ctx;
   {
-    RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+    std::shared_lock owner_lock{m_image_ctx.owner_lock};
     r = prepare_image_update(true);
     if (r < 0) {
       return r;
@@ -1477,7 +1477,7 @@ template <typename I>
 void Operations<I>::execute_metadata_set(const std::string &key,
                                        const std::string &value,
                                        Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
@@ -1516,7 +1516,7 @@ int Operations<I>::metadata_remove(const std::string &key) {
 
   C_SaferCond metadata_ctx;
   {
-    RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+    std::shared_lock owner_lock{m_image_ctx.owner_lock};
     r = prepare_image_update(true);
     if (r < 0) {
       return r;
@@ -1539,7 +1539,7 @@ int Operations<I>::metadata_remove(const std::string &key) {
 template <typename I>
 void Operations<I>::execute_metadata_remove(const std::string &key,
                                            Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
@@ -1571,7 +1571,7 @@ int Operations<I>::migrate(ProgressContext &prog_ctx) {
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     if (m_image_ctx.migration_info.empty()) {
       lderr(cct) << "image has no migrating parent" << dendl;
       return -EINVAL;
@@ -1596,7 +1596,7 @@ int Operations<I>::migrate(ProgressContext &prog_ctx) {
 template <typename I>
 void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
                                     Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -1608,22 +1608,22 @@ void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
     return;
   }
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
 
   if (m_image_ctx.migration_info.empty()) {
     lderr(cct) << "image has no migrating parent" << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EINVAL);
     return;
   }
   if (m_image_ctx.snap_id != CEPH_NOSNAP) {
     lderr(cct) << "snapshots cannot be migrated" << dendl;
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     on_finish->complete(-EROFS);
     return;
   }
 
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   operation::MigrateRequest<I> *req = new operation::MigrateRequest<I>(
     m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
@@ -1662,7 +1662,7 @@ template <typename I>
 void Operations<I>::execute_sparsify(size_t sparse_size,
                                      ProgressContext &prog_ctx,
                                      Context *on_finish) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
@@ -1682,18 +1682,17 @@ void Operations<I>::execute_sparsify(size_t sparse_size,
 
 template <typename I>
 int Operations<I>::prepare_image_update(bool request_lock) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked() &&
-              !m_image_ctx.owner_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock));
   if (m_image_ctx.image_watcher == nullptr) {
     return -EROFS;
   }
 
   // need to upgrade to a write lock
   C_SaferCond ctx;
-  m_image_ctx.owner_lock.put_read();
+  m_image_ctx.owner_lock.unlock_shared();
   bool attempting_lock = false;
   {
-    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    std::unique_lock owner_locker{m_image_ctx.owner_lock};
     if (m_image_ctx.exclusive_lock != nullptr &&
         (!m_image_ctx.exclusive_lock->is_lock_owner() ||
          !m_image_ctx.exclusive_lock->accept_requests())) {
@@ -1714,7 +1713,7 @@ int Operations<I>::prepare_image_update(bool request_lock) {
     r = ctx.wait();
   }
 
-  m_image_ctx.owner_lock.get_read();
+  m_image_ctx.owner_lock.lock_shared();
   if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) {
     m_image_ctx.exclusive_lock->unblock_requests();
   }
index 410b8ee88e4718d1e0adc00cb78c18b74005e0c3..67507efd0e41415a4706dbc38784ed8619dde4ba 100644 (file)
@@ -6,7 +6,7 @@
 #include "include/Context.h"
 #include "common/ceph_context.h"
 #include "common/Finisher.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/Timer.h"
 #include <map>
 #include <utility>
@@ -16,12 +16,11 @@ class CephContext;
 namespace librbd {
 
 struct TaskFinisherSingleton {
-  Mutex m_lock;
+  ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock");
   SafeTimer *m_safe_timer;
   Finisher *m_finisher;
 
-  explicit TaskFinisherSingleton(CephContext *cct)
-    : m_lock("librbd::TaskFinisher::m_lock") {
+  explicit TaskFinisherSingleton(CephContext *cct) {
     m_safe_timer = new SafeTimer(cct, m_lock, false);
     m_safe_timer->init();
     m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd");
@@ -29,7 +28,7 @@ struct TaskFinisherSingleton {
   }
   virtual ~TaskFinisherSingleton() {
     {
-      Mutex::Locker l(m_lock);
+      std::lock_guard l{m_lock};
       m_safe_timer->shutdown();
       delete m_safe_timer;
     }
@@ -53,7 +52,7 @@ public:
   }
 
   void cancel(const Task& task) {
-    Mutex::Locker l(*m_lock);
+    std::lock_guard l{*m_lock};
     typename TaskContexts::iterator it = m_task_contexts.find(task);
     if (it != m_task_contexts.end()) {
       delete it->second.first;
@@ -64,7 +63,7 @@ public:
 
   void cancel_all(Context *comp) {
     {
-      Mutex::Locker l(*m_lock);
+      std::lock_guard l{*m_lock};
       for (typename TaskContexts::iterator it = m_task_contexts.begin();
            it != m_task_contexts.end(); ++it) {
         delete it->second.first;
@@ -76,7 +75,7 @@ public:
   }
 
   bool add_event_after(const Task& task, double seconds, Context *ctx) {
-    Mutex::Locker l(*m_lock);
+    std::lock_guard l{*m_lock};
     if (m_task_contexts.count(task) != 0) {
       // task already scheduled on finisher or timer
       delete ctx;
@@ -94,7 +93,7 @@ public:
   }
 
   bool queue(const Task& task, Context *ctx) {
-    Mutex::Locker l(*m_lock);
+    std::lock_guard l{*m_lock};
     typename TaskContexts::iterator it = m_task_contexts.find(task);
     if (it != m_task_contexts.end()) {
       if (it->second.second != NULL) {
@@ -130,7 +129,7 @@ private:
 
   CephContext &m_cct;
 
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   Finisher *m_finisher;
   SafeTimer *m_safe_timer;
 
@@ -140,7 +139,7 @@ private:
   void complete(const Task& task) {
     Context *ctx = NULL;
     {
-      Mutex::Locker l(*m_lock);
+      std::lock_guard l{*m_lock};
       typename TaskContexts::iterator it = m_task_contexts.find(task);
       if (it != m_task_contexts.end()) {
         ctx = it->second.first;
index c02598983e705174627f153710a8a5ef441a2279..f13e02d5834f260bb06c230cbe8f0071b3283226 100644 (file)
@@ -91,20 +91,20 @@ Watcher::Watcher(librados::IoCtx& ioctx, ContextWQ *work_queue,
                           const string& oid)
   : m_ioctx(ioctx), m_work_queue(work_queue), m_oid(oid),
     m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
-    m_watch_lock(util::unique_lock_name("librbd::Watcher::m_watch_lock", this)),
+    m_watch_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::Watcher::m_watch_lock", this))),
     m_watch_handle(0), m_notifier(work_queue, ioctx, oid),
     m_watch_state(WATCH_STATE_IDLE), m_watch_ctx(*this) {
 }
 
 Watcher::~Watcher() {
-  RWLock::RLocker l(m_watch_lock);
+  std::shared_lock l{m_watch_lock};
   ceph_assert(is_unregistered(m_watch_lock));
 }
 
 void Watcher::register_watch(Context *on_finish) {
   ldout(m_cct, 10) << dendl;
 
-  RWLock::WLocker watch_locker(m_watch_lock);
+  std::unique_lock watch_locker{m_watch_lock};
   ceph_assert(is_unregistered(m_watch_lock));
   m_watch_state = WATCH_STATE_REGISTERING;
   m_watch_blacklisted = false;
@@ -122,7 +122,7 @@ void Watcher::handle_register_watch(int r, Context *on_finish) {
   bool watch_error = false;
   Context *unregister_watch_ctx = nullptr;
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     ceph_assert(m_watch_state == WATCH_STATE_REGISTERING);
 
     m_watch_state = WATCH_STATE_IDLE;
@@ -156,7 +156,7 @@ void Watcher::unregister_watch(Context *on_finish) {
   ldout(m_cct, 10) << dendl;
 
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     if (m_watch_state != WATCH_STATE_IDLE) {
       ldout(m_cct, 10) << "delaying unregister until register completed"
                        << dendl;
@@ -183,7 +183,7 @@ void Watcher::unregister_watch(Context *on_finish) {
 }
 
 bool Watcher::notifications_blocked() const {
-  RWLock::RLocker locker(m_watch_lock);
+  std::shared_lock locker{m_watch_lock};
 
   bool blocked = (m_blocked_count > 0);
   ldout(m_cct, 5) << "blocked=" << blocked << dendl;
@@ -192,7 +192,7 @@ bool Watcher::notifications_blocked() const {
 
 void Watcher::block_notifies(Context *on_finish) {
   {
-    RWLock::WLocker locker(m_watch_lock);
+    std::unique_lock locker{m_watch_lock};
     ++m_blocked_count;
     ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
   }
@@ -200,7 +200,7 @@ void Watcher::block_notifies(Context *on_finish) {
 }
 
 void Watcher::unblock_notifies() {
-  RWLock::WLocker locker(m_watch_lock);
+  std::unique_lock locker{m_watch_lock};
   ceph_assert(m_blocked_count > 0);
   --m_blocked_count;
   ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
@@ -211,12 +211,12 @@ void Watcher::flush(Context *on_finish) {
 }
 
 std::string Watcher::get_oid() const {
-  RWLock::RLocker locker(m_watch_lock);
+  std::shared_lock locker{m_watch_lock};
   return m_oid;
 }
 
 void Watcher::set_oid(const string& oid) {
-  RWLock::WLocker watch_locker(m_watch_lock);
+  std::unique_lock watch_locker{m_watch_lock};
   ceph_assert(is_unregistered(m_watch_lock));
 
   m_oid = oid;
@@ -225,7 +225,7 @@ void Watcher::set_oid(const string& oid) {
 void Watcher::handle_error(uint64_t handle, int err) {
   lderr(m_cct) << "handle=" << handle << ": " << cpp_strerror(err) << dendl;
 
-  RWLock::WLocker watch_locker(m_watch_lock);
+  std::unique_lock watch_locker{m_watch_lock};
   m_watch_error = true;
 
   if (is_registered(m_watch_lock)) {
@@ -250,7 +250,7 @@ void Watcher::rewatch() {
 
   Context *unregister_watch_ctx = nullptr;
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
 
     if (m_unregister_watch_ctx != nullptr) {
@@ -276,7 +276,7 @@ void Watcher::handle_rewatch(int r) {
   bool watch_error = false;
   Context *unregister_watch_ctx = nullptr;
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
 
     m_watch_blacklisted = false;
@@ -318,7 +318,7 @@ void Watcher::handle_rewatch_callback(int r) {
   bool watch_error = false;
   Context *unregister_watch_ctx = nullptr;
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
 
     if (m_unregister_watch_ctx != nullptr) {
index 69e87ad30ce99cc5a26f39c7ed126f7827f141a5..e2889605010ca7fc9e19fdd6a7640560c1801acb 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_WATCHER_H
 
 #include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/RWLock.h"
 #include "include/rados/librados.hpp"
 #include "librbd/watcher/Notifier.h"
@@ -48,20 +48,20 @@ public:
   void set_oid(const string& oid);
 
   uint64_t get_watch_handle() const {
-    RWLock::RLocker watch_locker(m_watch_lock);
+    std::shared_lock watch_locker{m_watch_lock};
     return m_watch_handle;
   }
 
   bool is_registered() const {
-    RWLock::RLocker locker(m_watch_lock);
+    std::shared_lock locker{m_watch_lock};
     return is_registered(m_watch_lock);
   }
   bool is_unregistered() const {
-    RWLock::RLocker locker(m_watch_lock);
+    std::shared_lock locker{m_watch_lock};
     return is_unregistered(m_watch_lock);
   }
   bool is_blacklisted() const {
-    RWLock::RLocker locker(m_watch_lock);
+    std::shared_lock locker{m_watch_lock};
     return m_watch_blacklisted;
   }
 
@@ -76,7 +76,7 @@ protected:
   ContextWQ *m_work_queue;
   std::string m_oid;
   CephContext *m_cct;
-  mutable RWLock m_watch_lock;
+  mutable ceph::shared_mutex m_watch_lock;
   uint64_t m_watch_handle;
   watcher::Notifier m_notifier;
 
@@ -85,10 +85,10 @@ protected:
 
   AsyncOpTracker m_async_op_tracker;
 
-  bool is_registered(const RWLock&) const {
+  bool is_registered(const ceph::shared_mutex&) const {
     return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0);
   }
-  bool is_unregistered(const RWLock&) const {
+  bool is_unregistered(const ceph::shared_mutex&) const {
     return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0);
   }
 
index a45180b0b004afecd602fe55980b91b4d4896a7b..86be5b4e948a2e21211dc21bdbc12276b3a8ebbc 100644 (file)
@@ -13,6 +13,7 @@
 #include "include/rados/librados.hpp"
 #include "include/interval_set.h"
 #include "common/errno.h"
+#include "common/Cond.h"
 #include "common/Throttle.h"
 #include "osdc/Striper.h"
 #include "librados/snap_set_diff.h"
@@ -241,7 +242,7 @@ int DiffIterate<I>::diff_iterate(I *ictx,
   // ensure previous writes are visible to listsnaps
   C_SaferCond flush_ctx;
   {
-    RWLock::RLocker owner_locker(ictx->owner_lock);
+    std::shared_lock owner_locker{ictx->owner_lock};
     auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, ictx,
                                                         io::AIO_TYPE_FLUSH);
     auto req = io::ImageDispatchSpec<I>::create_flush_request(
@@ -259,9 +260,9 @@ int DiffIterate<I>::diff_iterate(I *ictx,
     return r;
   }
 
-  ictx->image_lock.get_read();
+  ictx->image_lock.lock_shared();
   r = clip_io(ictx, off, &len);
-  ictx->image_lock.put_read();
+  ictx->image_lock.unlock_shared();
   if (r < 0) {
     return r;
   }
@@ -282,7 +283,7 @@ int DiffIterate<I>::execute() {
   uint64_t from_size = 0;
   uint64_t end_size;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     head_ctx.dup(m_image_ctx.data_ctx);
     if (m_from_snap_name) {
       from_snap_id = m_image_ctx.get_snap_id(m_from_snap_namespace, m_from_snap_name);
@@ -307,7 +308,7 @@ int DiffIterate<I>::execute() {
   bool fast_diff_enabled = false;
   BitVector<2> object_diff_state;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     if (m_whole_object && (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
       r = diff_object_map(from_snap_id, end_snap_id, &object_diff_state);
       if (r < 0) {
@@ -330,7 +331,7 @@ int DiffIterate<I>::execute() {
   DiffContext diff_context(m_image_ctx, m_callback, m_callback_arg,
                            m_whole_object, from_snap_id, end_snap_id);
   if (m_include_parent && from_snap_id == 0) {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     uint64_t overlap = 0;
     m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &overlap);
     r = 0;
@@ -409,7 +410,7 @@ int DiffIterate<I>::execute() {
 template <typename I>
 int DiffIterate<I>::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
                                     BitVector<2>* object_diff_state) {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   CephContext* cct = m_image_ctx.cct;
 
   bool diff_from_start = (from_snap_id == 0);
index 04f155b5189b4bb3e411098f74ba0231dc5b13d5..aa341bc25328a7de36dbb145f7520ac88529eb71 100644 (file)
@@ -38,7 +38,7 @@ namespace {
 template <typename I>
 snap_t get_group_snap_id(I* ictx,
                          const cls::rbd::SnapshotNamespace& in_snap_namespace) {
-  ceph_assert(ictx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
   auto it = ictx->snap_ids.lower_bound({in_snap_namespace, ""});
   if (it != ictx->snap_ids.end() && it->first.first == in_snap_namespace) {
     return it->second;
@@ -261,10 +261,10 @@ int group_snap_remove_by_record(librados::IoCtx& group_ioctx,
     on_finishes[i] = new C_SaferCond;
 
     std::string snap_name;
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     snap_t snap_id = get_group_snap_id(ictx, ne);
     r = ictx->get_snap_name(snap_id, &snap_name);
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
 
     if (r >= 0) {
       ldout(cct, 20) << "removing individual snapshot from image " << ictx->name
@@ -363,14 +363,14 @@ int group_snap_rollback_by_record(librados::IoCtx& group_ioctx,
 
   ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
   for (auto ictx: ictxs) {
-    RWLock::RLocker owner_lock(ictx->owner_lock);
+    std::shared_lock owner_lock{ictx->owner_lock};
     if (ictx->exclusive_lock != nullptr) {
       ictx->exclusive_lock->block_requests(-EBUSY);
     }
   }
   for (int i = 0; i < snap_count; ++i) {
     ImageCtx *ictx = ictxs[i];
-    RWLock::RLocker owner_lock(ictx->owner_lock);
+    std::shared_lock owner_lock{ictx->owner_lock};
 
     on_finishes[i] = new C_SaferCond;
     if (ictx->exclusive_lock != nullptr) {
@@ -398,12 +398,12 @@ int group_snap_rollback_by_record(librados::IoCtx& group_ioctx,
     ImageCtx *ictx = ictxs[i];
     on_finishes[i] = new C_SaferCond;
 
-    RWLock::RLocker owner_locker(ictx->owner_lock);
+    std::shared_lock owner_locker{ictx->owner_lock};
     std::string snap_name;
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     snap_t snap_id = get_group_snap_id(ictx, ne);
     r = ictx->get_snap_name(snap_id, &snap_name);
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
 
     if (r >= 0) {
       ldout(cct, 20) << "rolling back to individual snapshot for image " << ictx->name
@@ -928,14 +928,14 @@ int Group<I>::snap_create(librados::IoCtx& group_ioctx,
   ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
 
   for (auto ictx: ictxs) {
-    RWLock::RLocker owner_lock(ictx->owner_lock);
+    std::shared_lock owner_lock{ictx->owner_lock};
     if (ictx->exclusive_lock != nullptr) {
       ictx->exclusive_lock->block_requests(-EBUSY);
     }
   }
   for (int i = 0; i < image_count; ++i) {
     ImageCtx *ictx = ictxs[i];
-    RWLock::RLocker owner_lock(ictx->owner_lock);
+    std::shared_lock owner_lock{ictx->owner_lock};
 
     on_finishes[i] = new C_SaferCond;
     if (ictx->exclusive_lock != nullptr) {
@@ -980,9 +980,9 @@ int Group<I>::snap_create(librados::IoCtx& group_ioctx,
       ret_code = r;
     } else {
       ImageCtx *ictx = ictxs[i];
-      ictx->image_lock.get_read();
+      ictx->image_lock.lock_shared();
       snap_t snap_id = get_group_snap_id(ictx, ne);
-      ictx->image_lock.put_read();
+      ictx->image_lock.unlock_shared();
       if (snap_id == CEPH_NOSNAP) {
        ldout(cct, 20) << "Couldn't find created snapshot with namespace: "
                        << ne << dendl;
@@ -1018,10 +1018,10 @@ remove_image_snaps:
 
     on_finishes[i] = new C_SaferCond;
     std::string snap_name;
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     snap_t snap_id = get_group_snap_id(ictx, ne);
     r = ictx->get_snap_name(snap_id, &snap_name);
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
     if (r >= 0) {
       ictx->operations->snap_remove(ne, snap_name.c_str(), on_finishes[i]);
     } else {
index 78140565386ebe96335aad1cb7d870006a83cfb5..69e9f80b29c38a881dc9571d87091e4979c6c524 100644 (file)
@@ -86,7 +86,7 @@ int Image<I>::get_op_features(I *ictx, uint64_t *op_features) {
     return r;
   }
 
-  RWLock::RLocker image_locker(ictx->image_lock);
+  std::shared_lock image_locker{ictx->image_lock};
   *op_features = ictx->op_features;
   return 0;
 }
@@ -200,12 +200,12 @@ int Image<I>::get_parent(I *ictx,
     return r;
   }
 
-  RWLock::RLocker image_locker(ictx->image_lock);
+  std::shared_lock image_locker{ictx->image_lock};
 
   bool release_image_lock = false;
   BOOST_SCOPE_EXIT_ALL(ictx, &release_image_lock) {
     if (release_image_lock) {
-      ictx->parent->image_lock.put_read();
+      ictx->parent->image_lock.unlock_shared();
     }
   };
 
@@ -214,7 +214,7 @@ int Image<I>::get_parent(I *ictx,
   auto parent = ictx->parent;
   if (!ictx->migration_info.empty() && ictx->parent != nullptr) {
     release_image_lock = true;
-    ictx->parent->image_lock.get_read();
+    ictx->parent->image_lock.lock_shared();
 
     parent = ictx->parent->parent;
   }
@@ -227,7 +227,7 @@ int Image<I>::get_parent(I *ictx,
   parent_image->pool_name = parent->md_ctx.get_pool_name();
   parent_image->pool_namespace = parent->md_ctx.get_namespace();
 
-  RWLock::RLocker parent_image_locker(parent->image_lock);
+  std::shared_lock parent_image_locker{parent->image_lock};
   parent_snap->id = parent->snap_id;
   parent_snap->namespace_type = RBD_SNAP_NAMESPACE_TYPE_USER;
   if (parent->snap_id != CEPH_NOSNAP) {
@@ -309,7 +309,7 @@ template <typename I>
 int Image<I>::list_descendants(
     I *ictx, const std::optional<size_t> &max_level,
     std::vector<librbd::linked_image_spec_t> *images) {
-  RWLock::RLocker l(ictx->image_lock);
+  std::shared_lock l{ictx->image_lock};
   std::vector<librados::snap_t> snap_ids;
   if (ictx->snap_id != CEPH_NOSNAP) {
     snap_ids.push_back(ictx->snap_id);
@@ -516,7 +516,7 @@ int Image<I>::deep_copy(I *src, librados::IoCtx& dest_md_ctx,
   uint64_t features;
   uint64_t src_size;
   {
-    RWLock::RLocker image_locker(src->image_lock);
+    std::shared_lock image_locker{src->image_lock};
 
     if (!src->migration_info.empty()) {
       lderr(cct) << "cannot deep copy migrating image" << dendl;
@@ -563,7 +563,7 @@ int Image<I>::deep_copy(I *src, librados::IoCtx& dest_md_ctx,
   if (flatten > 0) {
     parent_spec.pool_id = -1;
   } else {
-    RWLock::RLocker image_locker(src->image_lock);
+    std::shared_lock image_locker{src->image_lock};
 
     // use oldest snapshot or HEAD for parent spec
     if (!src->snap_info.empty()) {
@@ -610,7 +610,7 @@ int Image<I>::deep_copy(I *src, librados::IoCtx& dest_md_ctx,
 
   C_SaferCond lock_ctx;
   {
-    RWLock::WLocker locker(dest->owner_lock);
+    std::unique_lock locker{dest->owner_lock};
 
     if (dest->exclusive_lock == nullptr ||
         dest->exclusive_lock->is_lock_owner()) {
@@ -644,7 +644,7 @@ int Image<I>::deep_copy(I *src, I *dest, bool flatten,
   librados::snap_t snap_id_start = 0;
   librados::snap_t snap_id_end;
   {
-    RWLock::RLocker image_locker(src->image_lock);
+    std::shared_lock image_locker{src->image_lock};
     snap_id_end = src->snap_id;
   }
 
@@ -680,7 +680,7 @@ int Image<I>::snap_set(I *ictx,
   uint64_t snap_id = CEPH_NOSNAP;
   std::string name(snap_name == nullptr ? "" : snap_name);
   if (!name.empty()) {
-    RWLock::RLocker image_locker(ictx->image_lock);
+    std::shared_lock image_locker{ictx->image_lock};
     snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace{},
                                 snap_name);
     if (snap_id == CEPH_NOSNAP) {
index b0d80c94a49bc91c38a019bc4432ea87f0bb7cf7..b77039e70971b6485d01df83378019022016238a 100644 (file)
@@ -6,6 +6,7 @@
 #include "include/stringify.h"
 #include "common/dout.h"
 #include "common/errno.h"
+#include "common/Cond.h"
 #include "cls/rbd/cls_rbd_client.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/ImageCtx.h"
@@ -62,8 +63,9 @@ public:
                            ProgressContext *prog_ctx)
     : m_io_ctx(io_ctx), m_header_oid(header_oid), m_state(state),
       m_prog_ctx(prog_ctx), m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
-      m_lock(util::unique_lock_name("librbd::api::MigrationProgressContext",
-                                    this)) {
+      m_lock(ceph::make_mutex(
+       util::unique_lock_name("librbd::api::MigrationProgressContext",
+                              this))) {
     ceph_assert(m_prog_ctx != nullptr);
   }
 
@@ -90,14 +92,14 @@ private:
   ProgressContext *m_prog_ctx;
 
   CephContext* m_cct;
-  mutable Mutex m_lock;
-  Cond m_cond;
+  mutable ceph::mutex m_lock;
+  ceph::condition_variable m_cond;
   std::string m_state_description;
   bool m_pending_update = false;
   int m_in_flight_state_updates = 0;
 
   void send_state_description_update(const std::string &description) {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
 
     if (description == m_state_description) {
       return;
@@ -116,7 +118,7 @@ private:
   void set_state_description() {
     ldout(m_cct, 20) << "state_description=" << m_state_description << dendl;
 
-    ceph_assert(m_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(m_lock));
 
     librados::ObjectWriteOperation op;
     cls_client::migration_set_state(&op, m_state, m_state_description);
@@ -134,7 +136,7 @@ private:
   void handle_set_state_description(int r) {
     ldout(m_cct, 20) << "r=" << r << dendl;
 
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
 
     m_in_flight_state_updates--;
 
@@ -145,20 +147,17 @@ private:
       set_state_description();
       m_pending_update = false;
     } else {
-      m_cond.Signal();
+      m_cond.notify_all();
     }
   }
 
   void wait_for_in_flight_updates() {
-    Mutex::Locker locker(m_lock);
+    std::unique_lock locker{m_lock};
 
     ldout(m_cct, 20) << "m_in_flight_state_updates="
                      << m_in_flight_state_updates << dendl;
-
     m_pending_update = false;
-    while (m_in_flight_state_updates > 0) {
-      m_cond.Wait(m_lock);
-    }
+    m_cond.wait(locker, [this] { return m_in_flight_state_updates <= 0; });
   }
 };
 
@@ -389,7 +388,7 @@ int Migration<I>::prepare(librados::IoCtx& io_ctx,
 
   uint64_t features;
   {
-    RWLock::RLocker image_locker(image_ctx->image_lock);
+    std::shared_lock image_locker{image_ctx->image_lock};
     features = image_ctx->features;
   }
   opts.get(RBD_IMAGE_OPTION_FEATURES, &features);
@@ -718,7 +717,7 @@ int Migration<I>::execute() {
                                       m_prog_ctx);
     r = dst_image_ctx->operations->migrate(prog_ctx);
     if (r == -EROFS) {
-      RWLock::RLocker owner_locker(dst_image_ctx->owner_lock);
+      std::shared_lock owner_locker{dst_image_ctx->owner_lock};
       if (dst_image_ctx->exclusive_lock != nullptr &&
           !dst_image_ctx->exclusive_lock->accept_ops()) {
         ldout(m_cct, 5) << "lost exclusive lock, retrying remote" << dendl;
@@ -750,12 +749,12 @@ int Migration<I>::abort() {
 
   int r;
 
-  m_src_image_ctx->owner_lock.get_read();
+  m_src_image_ctx->owner_lock.lock_shared();
   if (m_src_image_ctx->exclusive_lock != nullptr &&
       !m_src_image_ctx->exclusive_lock->is_lock_owner()) {
     C_SaferCond ctx;
     m_src_image_ctx->exclusive_lock->acquire_lock(&ctx);
-    m_src_image_ctx->owner_lock.put_read();
+    m_src_image_ctx->owner_lock.unlock_shared();
     r = ctx.wait();
     if (r < 0) {
       lderr(m_cct) << "error acquiring exclusive lock: " << cpp_strerror(r)
@@ -763,7 +762,7 @@ int Migration<I>::abort() {
       return r;
     }
   } else {
-    m_src_image_ctx->owner_lock.put_read();
+    m_src_image_ctx->owner_lock.unlock_shared();
   }
 
   group_info_t group_info;
@@ -1026,7 +1025,7 @@ int Migration<I>::validate_src_snaps() {
   }
 
   for (auto &snap : snaps) {
-    RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+    std::shared_lock image_locker{m_src_image_ctx->image_lock};
     cls::rbd::ParentImageSpec parent_spec{m_src_image_ctx->md_ctx.get_id(),
                                           m_src_image_ctx->md_ctx.get_namespace(),
                                           m_src_image_ctx->id, snap.id};
@@ -1121,12 +1120,12 @@ template <typename I>
 int Migration<I>::v2_unlink_src_image() {
   ldout(m_cct, 10) << dendl;
 
-  m_src_image_ctx->owner_lock.get_read();
+  m_src_image_ctx->owner_lock.lock_shared();
   if (m_src_image_ctx->exclusive_lock != nullptr &&
       m_src_image_ctx->exclusive_lock->is_lock_owner()) {
     C_SaferCond ctx;
     m_src_image_ctx->exclusive_lock->release_lock(&ctx);
-    m_src_image_ctx->owner_lock.put_read();
+    m_src_image_ctx->owner_lock.unlock_shared();
     int r = ctx.wait();
      if (r < 0) {
       lderr(m_cct) << "error releasing exclusive lock: " << cpp_strerror(r)
@@ -1134,7 +1133,7 @@ int Migration<I>::v2_unlink_src_image() {
       return r;
      }
   } else {
-    m_src_image_ctx->owner_lock.put_read();
+    m_src_image_ctx->owner_lock.unlock_shared();
   }
 
   int r = Trash<I>::move(m_src_io_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION,
@@ -1193,7 +1192,7 @@ int Migration<I>::create_dst_image() {
   uint64_t size;
   cls::rbd::ParentImageSpec parent_spec;
   {
-    RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+    std::shared_lock image_locker{m_src_image_ctx->image_lock};
     size = m_src_image_ctx->size;
 
     // use oldest snapshot or HEAD for parent spec
@@ -1256,7 +1255,7 @@ int Migration<I>::create_dst_image() {
   } BOOST_SCOPE_EXIT_END;
 
   {
-    RWLock::RLocker owner_locker(dst_image_ctx->owner_lock);
+    std::shared_lock owner_locker{dst_image_ctx->owner_lock};
     r = dst_image_ctx->operations->prepare_image_update(true);
     if (r < 0) {
       lderr(m_cct) << "cannot obtain exclusive lock" << dendl;
@@ -1541,7 +1540,7 @@ int Migration<I>::relink_children(I *from_image_ctx, I *to_image_ctx) {
       // Also collect the list of the children currently attached to the
       // source, so we could make a proper decision later about relinking.
 
-      RWLock::RLocker src_image_locker(to_image_ctx->image_lock);
+      std::shared_lock src_image_locker{to_image_ctx->image_lock};
       cls::rbd::ParentImageSpec src_parent_spec{to_image_ctx->md_ctx.get_id(),
                                                 to_image_ctx->md_ctx.get_namespace(),
                                                 to_image_ctx->id, snap.id};
@@ -1553,7 +1552,7 @@ int Migration<I>::relink_children(I *from_image_ctx, I *to_image_ctx) {
         return r;
       }
 
-      RWLock::RLocker image_locker(from_image_ctx->image_lock);
+      std::shared_lock image_locker{from_image_ctx->image_lock};
       snap.id = from_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
                                             snap.name);
       if (snap.id == CEPH_NOSNAP) {
@@ -1564,7 +1563,7 @@ int Migration<I>::relink_children(I *from_image_ctx, I *to_image_ctx) {
 
     std::vector<librbd::linked_image_spec_t> child_images;
     {
-      RWLock::RLocker image_locker(from_image_ctx->image_lock);
+      std::shared_lock image_locker{from_image_ctx->image_lock};
       cls::rbd::ParentImageSpec parent_spec{from_image_ctx->md_ctx.get_id(),
                                             from_image_ctx->md_ctx.get_namespace(),
                                             from_image_ctx->id, snap.id};
@@ -1614,7 +1613,7 @@ int Migration<I>::relink_child(I *from_image_ctx, I *to_image_ctx,
 
   librados::snap_t to_snap_id;
   {
-    RWLock::RLocker image_locker(to_image_ctx->image_lock);
+    std::shared_lock image_locker{to_image_ctx->image_lock};
     to_snap_id = to_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
                                              from_snap.name);
     if (to_snap_id == CEPH_NOSNAP) {
@@ -1652,7 +1651,7 @@ int Migration<I>::relink_child(I *from_image_ctx, I *to_image_ctx,
   cls::rbd::ParentImageSpec parent_spec;
   uint64_t parent_overlap;
   {
-    RWLock::RLocker image_locker(child_image_ctx->image_lock);
+    std::shared_lock image_locker{child_image_ctx->image_lock};
 
     // use oldest snapshot or HEAD for parent spec
     if (!child_image_ctx->snap_info.empty()) {
index ebdf6acef479430f9f38f1abe6e2388426cb5374..4e67c5123fd8e900aa2bb6cc5f68c0340e67d833 100644 (file)
@@ -195,7 +195,7 @@ int Mirror<I>::image_enable(I *ictx, bool relax_same_pool_parent_check) {
 
   // is mirroring not enabled for the parent?
   {
-    RWLock::RLocker image_locker(ictx->image_lock);
+    std::shared_lock image_locker{ictx->image_lock};
     ImageCtx *parent = ictx->parent;
     if (parent) {
       if (relax_same_pool_parent_check &&
@@ -305,7 +305,7 @@ int Mirror<I>::image_disable(I *ictx, bool force) {
     };
 
     {
-      RWLock::RLocker l(ictx->image_lock);
+      std::shared_lock l{ictx->image_lock};
       map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
       for (auto &info : snap_info) {
         cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
index 88adf561d200a3dd188f79db87345ecea5121768..708f797ec698e943a997584fbfc22a4cfb56083a 100644 (file)
@@ -5,6 +5,7 @@
 #include "include/rados/librados.hpp"
 #include "common/dout.h"
 #include "common/errno.h"
+#include "common/Cond.h"
 #include "common/Throttle.h"
 #include "cls/rbd/cls_rbd_client.h"
 #include "osd/osd_types.h"
index a3c314c9762616f7f2701f8133ceaed9896c85f0..4d24f91a61594474b2fc0d4c1f1f26b0a8397594 100644 (file)
@@ -105,7 +105,7 @@ int Snapshot<I>::get_group_namespace(I *ictx, uint64_t snap_id,
     return r;
   }
 
-  RWLock::RLocker image_locker(ictx->image_lock);
+  std::shared_lock image_locker{ictx->image_lock};
   auto snap_info = ictx->get_snap_info(snap_id);
   if (snap_info == nullptr) {
     return -ENOENT;
@@ -128,7 +128,7 @@ int Snapshot<I>::get_trash_namespace(I *ictx, uint64_t snap_id,
     return r;
   }
 
-  RWLock::RLocker image_locker(ictx->image_lock);
+  std::shared_lock image_locker{ictx->image_lock};
   auto snap_info = ictx->get_snap_info(snap_id);
   if (snap_info == nullptr) {
     return -ENOENT;
@@ -151,7 +151,7 @@ int Snapshot<I>::get_namespace_type(I *ictx, uint64_t snap_id,
     return r;
   }
 
-  RWLock::RLocker l(ictx->image_lock);
+  std::shared_lock l{ictx->image_lock};
   auto snap_info = ictx->get_snap_info(snap_id);
   if (snap_info == nullptr) {
     return -ENOENT;
@@ -174,7 +174,7 @@ int Snapshot<I>::remove(I *ictx, uint64_t snap_id) {
   cls::rbd::SnapshotNamespace snapshot_namespace;
   std::string snapshot_name;
   {
-    RWLock::RLocker image_locker(ictx->image_lock);
+    std::shared_lock image_locker{ictx->image_lock};
     auto it = ictx->snap_info.find(snap_id);
     if (it == ictx->snap_info.end()) {
       return -ENOENT;
index b8deddfb2bda459bd55d41f7b092efb78a027ab6..c384d534f7d03afdf1d056ee0f6db5cc19764b12 100644 (file)
@@ -137,32 +137,32 @@ int Trash<I>::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
 
   if (r == 0) {
     if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
-      RWLock::WLocker image_locker(ictx->image_lock);
+      std::unique_lock image_locker{ictx->image_lock};
       ictx->set_journal_policy(new journal::DisabledPolicy());
     }
 
-    ictx->owner_lock.get_read();
+    ictx->owner_lock.lock_shared();
     if (ictx->exclusive_lock != nullptr) {
       ictx->exclusive_lock->block_requests(0);
 
       r = ictx->operations->prepare_image_update(false);
       if (r < 0) {
         lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
-        ictx->owner_lock.put_read();
+        ictx->owner_lock.unlock_shared();
         ictx->state->close();
         return -EBUSY;
       }
     }
-    ictx->owner_lock.put_read();
+    ictx->owner_lock.unlock_shared();
 
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     if (!ictx->migration_info.empty()) {
       lderr(cct) << "cannot move migrating image to trash" << dendl;
-      ictx->image_lock.put_read();
+      ictx->image_lock.unlock_shared();
       ictx->state->close();
       return -EBUSY;
     }
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
 
     r = disable_mirroring<I>(ictx);
     if (r < 0) {
index 5e6f9dcf5b7bb486dc24a139a2ba2aa12ecbc49d..0b57201bbbb88efe7fd0a26d753bccd0156dab21 100644 (file)
@@ -45,7 +45,7 @@ struct ObjectCacherObjectDispatch<I>::C_InvalidateCache : public Context {
   }
 
   void finish(int r) override {
-    ceph_assert(dispatcher->m_cache_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(dispatcher->m_cache_lock));
     auto cct = dispatcher->m_image_ctx->cct;
 
     if (r == -EBLACKLISTED) {
@@ -80,8 +80,8 @@ ObjectCacherObjectDispatch<I>::ObjectCacherObjectDispatch(
     I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
   : m_image_ctx(image_ctx), m_max_dirty(max_dirty),
     m_writethrough_until_flush(writethrough_until_flush),
-    m_cache_lock(util::unique_lock_name(
-      "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this)) {
+    m_cache_lock(ceph::make_mutex(util::unique_lock_name(
+      "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this))) {
 }
 
 template <typename I>
@@ -97,7 +97,7 @@ void ObjectCacherObjectDispatch<I>::init() {
   auto cct = m_image_ctx->cct;
   ldout(cct, 5) << dendl;
 
-  m_cache_lock.Lock();
+  m_cache_lock.lock();
   ldout(cct, 5) << "enabling caching..." << dendl;
   m_writeback_handler = new ObjectCacherWriteback(m_image_ctx, m_cache_lock);
 
@@ -144,7 +144,7 @@ void ObjectCacherObjectDispatch<I>::init() {
   m_object_set = new ObjectCacher::ObjectSet(nullptr,
                                              m_image_ctx->data_ctx.get_id(), 0);
   m_object_cacher->start();
-  m_cache_lock.Unlock();
+  m_cache_lock.unlock();
 
   // add ourself to the IO object dispatcher chain
   if (m_max_dirty > 0) {
@@ -173,10 +173,9 @@ void ObjectCacherObjectDispatch<I>::shut_down(Context* on_finish) {
   on_finish = new C_InvalidateCache(this, true, on_finish);
 
   // flush all pending writeback state
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   m_object_cacher->release_set(m_object_set);
   m_object_cacher->flush_set(m_object_set, on_finish);
-  m_cache_lock.Unlock();
 }
 
 template <typename I>
@@ -195,9 +194,9 @@ bool ObjectCacherObjectDispatch<I>::read(
   on_dispatched = util::create_async_context_callback(*m_image_ctx,
                                                       on_dispatched);
 
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   auto rd = m_object_cacher->prepare_read(snap_id, read_data, op_flags);
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no,
                       object_off, object_len, 0);
@@ -208,9 +207,9 @@ bool ObjectCacherObjectDispatch<I>::read(
   ZTracer::Trace trace(parent_trace);
   *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
 
-  m_cache_lock.Lock();
+  m_cache_lock.lock();
   int r = m_object_cacher->readx(rd, m_object_set, on_dispatched, &trace);
-  m_cache_lock.Unlock();
+  m_cache_lock.unlock();
   if (r != 0) {
     on_dispatched->complete(r);
   }
@@ -237,9 +236,9 @@ bool ObjectCacherObjectDispatch<I>::discard(
   auto ctx = *on_finish;
   *on_finish = new FunctionContext(
     [this, object_extents, ctx](int r) {
-      m_cache_lock.Lock();
+      m_cache_lock.lock();
       m_object_cacher->discard_set(m_object_set, object_extents);
-      m_cache_lock.Unlock();
+      m_cache_lock.unlock();
 
       ctx->complete(r);
     });
@@ -252,10 +251,9 @@ bool ObjectCacherObjectDispatch<I>::discard(
 
   // ensure any in-flight writeback is complete before advancing
   // the discard request
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   m_object_cacher->discard_writeback(m_object_set, object_extents,
                                      on_dispatched);
-  m_cache_lock.Unlock();
   return true;
 }
 
@@ -274,10 +272,10 @@ bool ObjectCacherObjectDispatch<I>::write(
   on_dispatched = util::create_async_context_callback(*m_image_ctx,
                                                       on_dispatched);
 
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   ObjectCacher::OSDWrite *wr = m_object_cacher->prepare_write(
     snapc, data, ceph::real_time::min(), op_flags, *journal_tid);
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   ObjectExtent extent(data_object_name(m_image_ctx, object_no),
                       object_no, object_off, data.length(), 0);
@@ -288,9 +286,8 @@ bool ObjectCacherObjectDispatch<I>::write(
   ZTracer::Trace trace(parent_trace);
   *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
 
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   m_object_cacher->writex(wr, m_object_set, on_dispatched, &trace);
-  m_cache_lock.Unlock();
   return true;
 }
 
@@ -345,7 +342,7 @@ bool ObjectCacherObjectDispatch<I>::compare_and_write(
   object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
                               object_no, object_off, cmp_data.length(), 0);
 
-  Mutex::Locker cache_locker(m_cache_lock);
+  std::lock_guard cache_locker{m_cache_lock};
   m_object_cacher->flush_set(m_object_set, object_extents, &trace,
                              on_dispatched);
   return true;
@@ -363,7 +360,7 @@ bool ObjectCacherObjectDispatch<I>::flush(
   on_dispatched = util::create_async_context_callback(*m_image_ctx,
                                                       on_dispatched);
 
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
     m_user_flushed = true;
     if (m_writethrough_until_flush && m_max_dirty > 0) {
@@ -374,7 +371,6 @@ bool ObjectCacherObjectDispatch<I>::flush(
 
   *dispatch_result = io::DISPATCH_RESULT_CONTINUE;
   m_object_cacher->flush_set(m_object_set, on_dispatched);
-  m_cache_lock.Unlock();
   return true;
 }
 
@@ -389,10 +385,9 @@ bool ObjectCacherObjectDispatch<I>::invalidate_cache(Context* on_finish) {
   // invalidate any remaining cache entries
   on_finish = new C_InvalidateCache(this, false, on_finish);
 
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   m_object_cacher->release_set(m_object_set);
   m_object_cacher->flush_set(m_object_set, on_finish);
-  m_cache_lock.Unlock();
   return true;
 }
 
@@ -402,10 +397,8 @@ bool ObjectCacherObjectDispatch<I>::reset_existence_cache(
   auto cct = m_image_ctx->cct;
   ldout(cct, 5) << dendl;
 
-  m_cache_lock.Lock();
+  std::lock_guard locker{m_cache_lock};
   m_object_cacher->clear_nonexistence(m_object_set);
-  m_cache_lock.Unlock();
-
   return false;
 }
 
index 22a9cf3586f4b913660bc711668be43ed03d4fbd..2c62e31f07ce694bf093767b8b0c12b38f34a092 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
 
 #include "librbd/io/ObjectDispatchInterface.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "osdc/ObjectCacher.h"
 
 struct WritebackHandler;
@@ -99,7 +99,7 @@ private:
   size_t m_max_dirty;
   bool m_writethrough_until_flush;
 
-  Mutex m_cache_lock;
+  ceph::mutex m_cache_lock;
   ObjectCacher *m_object_cacher = nullptr;
   ObjectCacher::ObjectSet *m_object_set = nullptr;
 
index a59203b322089f9970720c574ac6703a559c2079..c6e26506a3a1b386ba36754c0181fbf499dd5f49 100644 (file)
@@ -6,7 +6,7 @@
 #include "librbd/cache/ObjectCacherWriteback.h"
 #include "common/ceph_context.h"
 #include "common/dout.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/WorkQueue.h"
 #include "osdc/Striper.h"
 #include "include/Context.h"
@@ -42,13 +42,13 @@ namespace cache {
  */
 class C_ReadRequest : public Context {
 public:
-  C_ReadRequest(CephContext *cct, Context *c, Mutex *cache_lock)
+  C_ReadRequest(CephContext *cct, Context *c, ceph::mutex *cache_lock)
     : m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) {
   }
   void finish(int r) override {
     ldout(m_cct, 20) << "aio_cb completing " << dendl;
     {
-      Mutex::Locker cache_locker(*m_cache_lock);
+      std::lock_guard cache_locker{*m_cache_lock};
       m_ctx->complete(r);
     }
     ldout(m_cct, 20) << "aio_cb finished" << dendl;
@@ -56,7 +56,7 @@ public:
 private:
   CephContext *m_cct;
   Context *m_ctx;
-  Mutex *m_cache_lock;
+  ceph::mutex *m_cache_lock;
 };
 
 class C_OrderedWrite : public Context {
@@ -69,7 +69,7 @@ public:
   void finish(int r) override {
     ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl;
     {
-      Mutex::Locker l(m_wb_handler->m_lock);
+      std::lock_guard l{m_wb_handler->m_lock};
       ceph_assert(!m_result->done);
       m_result->done = true;
       m_result->ret = r;
@@ -105,7 +105,7 @@ struct C_CommitIOEventExtent : public Context {
   }
 };
 
-ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock)
+ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock)
   : m_tid(0), m_lock(lock), m_ictx(ictx) {
 }
 
@@ -147,11 +147,11 @@ bool ObjectCacherWriteback::may_copy_on_write(const object_t& oid,
                                               uint64_t read_len,
                                               snapid_t snapid)
 {
-  m_ictx->image_lock.get_read();
+  m_ictx->image_lock.lock_shared();
   librados::snap_t snap_id = m_ictx->snap_id;
   uint64_t overlap = 0;
   m_ictx->get_parent_overlap(snap_id, &overlap);
-  m_ictx->image_lock.put_read();
+  m_ictx->image_lock.unlock_shared();
 
   uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
 
@@ -244,7 +244,7 @@ void ObjectCacherWriteback::overwrite_extent(const object_t& oid, uint64_t off,
 
 void ObjectCacherWriteback::complete_writes(const std::string& oid)
 {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   std::queue<write_result_d*>& results = m_writes[oid];
   ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl;
   std::list<write_result_d*> finished;
index 2addaba5bdf44d7428b1ac81a1078c5366df4216..6d7f367b614fc149af1820431c112112d8f7c0e8 100644 (file)
@@ -9,7 +9,6 @@
 #include "osdc/WritebackHandler.h"
 #include <queue>
 
-class Mutex;
 class Context;
 
 namespace librbd {
@@ -20,7 +19,7 @@ namespace cache {
 
 class ObjectCacherWriteback : public WritebackHandler {
 public:
-  ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock);
+  ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock);
 
   // Note that oloc, trunc_size, and trunc_seq are ignored
   void read(const object_t& oid, uint64_t object_no,
@@ -64,7 +63,7 @@ private:
   void complete_writes(const std::string& oid);
 
   ceph_tid_t m_tid;
-  Mutex& m_lock;
+  ceph::mutex& m_lock;
   librbd::ImageCtx *m_ictx;
   ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
   friend class C_OrderedWrite;
index d576659a1a27277a022fcd09a7f0ffb46da8ce2a..475a5ad15fe9ecacdfe5a84d2990d3f392309f55 100644 (file)
@@ -4,7 +4,6 @@
 #ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
 #define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
 
-#include "common/Mutex.h"
 #include "librbd/io/ObjectDispatchInterface.h"
 #include "tools/immutable_object_cache/CacheClient.h"
 #include "librbd/cache/TypeTraits.h"
index 88e9e218453f84d8d3a0d563e392d3492b9b6b85..06eab207bcd1976b436417ee31a91992cd9820d7 100644 (file)
@@ -24,8 +24,8 @@ template <typename I>
 WriteAroundObjectDispatch<I>::WriteAroundObjectDispatch(
     I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
   : m_image_ctx(image_ctx), m_init_max_dirty(max_dirty), m_max_dirty(max_dirty),
-    m_lock(util::unique_lock_name(
-      "librbd::cache::WriteAroundObjectDispatch::lock", this)) {
+    m_lock(ceph::make_mutex(util::unique_lock_name(
+      "librbd::cache::WriteAroundObjectDispatch::lock", this))) {
   if (writethrough_until_flush) {
     m_max_dirty = 0;
   }
@@ -132,7 +132,7 @@ bool WriteAroundObjectDispatch<I>::flush(
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
     m_user_flushed = true;
     if (m_max_dirty == 0 && m_init_max_dirty > 0) {
@@ -173,12 +173,12 @@ bool WriteAroundObjectDispatch<I>::dispatch_unoptimized_io(
     io::DispatchResult* dispatch_result, Context* on_dispatched) {
   auto cct = m_image_ctx->cct;
 
-  m_lock.Lock();
+  m_lock.lock();
   auto in_flight_extents_it = m_in_flight_extents.find(object_no);
   if (in_flight_extents_it == m_in_flight_extents.end() ||
       !in_flight_extents_it->second.intersects(object_off, object_len)) {
     // no IO in-flight to the specified extent
-    m_lock.Unlock();
+    m_lock.unlock();
     return false;
   }
 
@@ -189,7 +189,7 @@ bool WriteAroundObjectDispatch<I>::dispatch_unoptimized_io(
   *dispatch_result = io::DISPATCH_RESULT_CONTINUE;
   m_blocked_unoptimized_ios[object_no].emplace(
     tid, BlockedIO{object_off, object_len, nullptr, on_dispatched});
-  m_lock.Unlock();
+  m_lock.unlock();
 
   return true;
 }
@@ -201,16 +201,16 @@ bool WriteAroundObjectDispatch<I>::dispatch_io(
     Context* on_dispatched) {
   auto cct = m_image_ctx->cct;
 
-  m_lock.Lock();
+  m_lock.lock();
   if (m_max_dirty == 0) {
     // write-through mode is active -- no-op the cache
-    m_lock.Unlock();
+    m_lock.unlock();
     return false;
   }
 
   if ((op_flags & LIBRADOS_OP_FLAG_FADVISE_FUA) != 0) {
     // force unit access flag is set -- disable write-around
-    m_lock.Unlock();
+    m_lock.unlock();
     return dispatch_unoptimized_io(object_no, object_off, object_len,
                                    dispatch_result, on_dispatched);
   }
@@ -231,9 +231,9 @@ bool WriteAroundObjectDispatch<I>::dispatch_io(
     m_queued_or_blocked_io_tids.insert(tid);
     m_blocked_ios[object_no].emplace(tid, BlockedIO{object_off, object_len, ctx,
                                                     on_dispatched});
-    m_lock.Unlock();
+    m_lock.unlock();
   } else if (can_dispatch_io(tid, object_len)) {
-    m_lock.Unlock();
+    m_lock.unlock();
 
     ldout(cct, 20) << "dispatching: tid=" << tid << dendl;
     on_dispatched->complete(0);
@@ -242,7 +242,7 @@ bool WriteAroundObjectDispatch<I>::dispatch_io(
     ldout(cct, 20) << "queueing: tid=" << tid << dendl;
     m_queued_or_blocked_io_tids.insert(tid);
     m_queued_ios.emplace(tid, QueuedIO{object_len, ctx, on_dispatched});
-    m_lock.Unlock();
+    m_lock.unlock();
   }
   return true;
 }
@@ -264,7 +264,7 @@ void WriteAroundObjectDispatch<I>::unblock_overlapping_ios(
     uint64_t object_no, uint64_t object_off, uint64_t object_len,
     Contexts* unoptimized_io_dispatches) {
   auto cct = m_image_ctx->cct;
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   auto in_flight_extents_it = m_in_flight_extents.find(object_no);
   ceph_assert(in_flight_extents_it != m_in_flight_extents.end());
@@ -337,7 +337,7 @@ void WriteAroundObjectDispatch<I>::unblock_overlapping_ios(
 template <typename I>
 bool WriteAroundObjectDispatch<I>::can_dispatch_io(
     uint64_t tid, uint64_t length) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   if (m_in_flight_bytes == 0 || m_in_flight_bytes + length <= m_max_dirty) {
     // no in-flight IO or still under max write-around in-flight limit.
@@ -358,7 +358,7 @@ void WriteAroundObjectDispatch<I>::handle_in_flight_io_complete(
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
   m_in_flight_io_tids.erase(tid);
   ceph_assert(m_in_flight_bytes >= object_len);
   m_in_flight_bytes -= object_len;
@@ -388,7 +388,7 @@ void WriteAroundObjectDispatch<I>::handle_in_flight_io_complete(
 
   // collect any queued flushes that were tied to queued IOs
   auto ready_flushes = collect_ready_flushes();
-  m_lock.Unlock();
+  m_lock.unlock();
 
   // dispatch any ready unoptimized IOs
   for (auto& it : unoptimized_io_dispatches) {
@@ -424,7 +424,7 @@ void WriteAroundObjectDispatch<I>::handle_in_flight_flush_complete(
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
 
-  m_lock.Lock();
+  m_lock.lock();
 
   // move the in-flight flush to the pending completion list
   auto it = m_in_flight_flushes.find(tid);
@@ -439,7 +439,7 @@ void WriteAroundObjectDispatch<I>::handle_in_flight_flush_complete(
   if (!finished_flushes.empty()) {
     std::swap(pending_flush_error, m_pending_flush_error);
   }
-  m_lock.Unlock();
+  m_lock.unlock();
 
   // complete flushes that were waiting on in-flight IO
   // (and propogate any IO errors)
@@ -453,7 +453,7 @@ void WriteAroundObjectDispatch<I>::handle_in_flight_flush_complete(
 template <typename I>
 typename WriteAroundObjectDispatch<I>::QueuedIOs
 WriteAroundObjectDispatch<I>::collect_ready_ios() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   QueuedIOs queued_ios;
 
@@ -474,7 +474,7 @@ WriteAroundObjectDispatch<I>::collect_ready_ios() {
 template <typename I>
 typename WriteAroundObjectDispatch<I>::Contexts
 WriteAroundObjectDispatch<I>::collect_ready_flushes() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   Contexts ready_flushes;
   auto io_tid_it = m_queued_or_blocked_io_tids.begin();
@@ -497,7 +497,7 @@ WriteAroundObjectDispatch<I>::collect_ready_flushes() {
 template <typename I>
 typename WriteAroundObjectDispatch<I>::Contexts
 WriteAroundObjectDispatch<I>::collect_finished_flushes() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   Contexts finished_flushes;
   auto io_tid_it = m_in_flight_io_tids.begin();
index 28721ce1172ea7d783e0e111ff5536010e4d60f9..ce217d2cf45228b26e5f8669f2e1eaf6fb0a2229 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "librbd/io/ObjectDispatchInterface.h"
 #include "include/interval_set.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "librbd/io/Types.h"
 #include <map>
 #include <set>
@@ -142,7 +142,7 @@ private:
   size_t m_init_max_dirty;
   size_t m_max_dirty;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
   bool m_user_flushed = false;
 
   uint64_t m_last_tid = 0;
index 32a899b3c9324779b13422f45893bc958f5e1cb8..705ddcb5a4b45e58a4461e98a7e43f11bce7f116 100644 (file)
@@ -35,7 +35,7 @@ ImageCopyRequest<I>::ImageCopyRequest(I *src_image_ctx, I *dst_image_ctx,
     m_snap_id_end(snap_id_end), m_flatten(flatten),
     m_object_number(object_number), m_snap_seqs(snap_seqs),
     m_prog_ctx(prog_ctx), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
-    m_lock(unique_lock_name("ImageCopyRequest::m_lock", this)) {
+    m_lock(ceph::make_mutex(unique_lock_name("ImageCopyRequest::m_lock", this))) {
 }
 
 template <typename I>
@@ -53,7 +53,7 @@ void ImageCopyRequest<I>::send() {
 
 template <typename I>
 void ImageCopyRequest<I>::cancel() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   ldout(m_cct, 20) << dendl;
   m_canceled = true;
@@ -68,7 +68,7 @@ void ImageCopyRequest<I>::send_object_copies() {
 
   uint64_t size;
   {
-    RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+    std::shared_lock image_locker{m_src_image_ctx->image_lock};
     size =  m_src_image_ctx->get_image_size(CEPH_NOSNAP);
     for (auto snap_id : m_src_image_ctx->snaps) {
       size = std::max(size, m_src_image_ctx->get_image_size(snap_id));
@@ -81,7 +81,7 @@ void ImageCopyRequest<I>::send_object_copies() {
 
   bool complete;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     for (uint64_t i = 0;
          i < m_src_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops");
          ++i) {
@@ -100,7 +100,7 @@ void ImageCopyRequest<I>::send_object_copies() {
 
 template <typename I>
 void ImageCopyRequest<I>::send_next_object_copy() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   if (m_canceled && m_ret_val == 0) {
     ldout(m_cct, 10) << "image copy canceled" << dendl;
@@ -132,7 +132,7 @@ void ImageCopyRequest<I>::handle_object_copy(uint64_t object_no, int r) {
 
   bool complete;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_current_ops > 0);
     --m_current_ops;
 
@@ -150,9 +150,9 @@ void ImageCopyRequest<I>::handle_object_copy(uint64_t object_no, int r) {
         m_copied_objects.pop();
         uint64_t progress_object_no = *m_object_number + 1;
         m_updating_progress = true;
-        m_lock.Unlock();
+        m_lock.unlock();
         m_prog_ctx->update_progress(progress_object_no, m_end_object_no);
-        m_lock.Lock();
+        m_lock.lock();
         ceph_assert(m_updating_progress);
         m_updating_progress = false;
       }
index e50507e884859c28b4bfb26aa44316f012e71ae1..15c3cd2951d22930a7cef311a61ef0aafe85f295 100644 (file)
@@ -6,7 +6,7 @@
 
 #include "include/int_types.h"
 #include "include/rados/librados.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/RefCountedObj.h"
 #include "librbd/Types.h"
 #include "librbd/deep_copy/Types.h"
@@ -77,7 +77,7 @@ private:
   Context *m_on_finish;
 
   CephContext *m_cct;
-  Mutex m_lock;
+  ceph::mutex m_lock;
   bool m_canceled = false;
 
   uint64_t m_object_no = 0;
index 3a1de0e1c75b4918bbb8226be097bac6e90a9dbc..15c647975f30c7aa3a0b6a2c2df913eb65cedd90 100644 (file)
@@ -224,10 +224,10 @@ void ObjectCopyRequest<I>::handle_read_object(int r) {
 
 template <typename I>
 void ObjectCopyRequest<I>::send_read_from_parent() {
-  m_src_image_ctx->image_lock.get_read();
+  m_src_image_ctx->image_lock.lock_shared();
   io::Extents image_extents;
   compute_read_from_parent_ops(&image_extents);
-  m_src_image_ctx->image_lock.put_read();
+  m_src_image_ctx->image_lock.unlock_shared();
 
   if (image_extents.empty()) {
     handle_read_from_parent(0);
@@ -380,7 +380,7 @@ void ObjectCopyRequest<I>::send_write_object() {
   int r;
   Context *finish_op_ctx;
   {
-    RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+    std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
     finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
   }
   if (finish_op_ctx == nullptr) {
@@ -434,14 +434,14 @@ void ObjectCopyRequest<I>::send_update_object_map() {
     return;
   }
 
-  m_dst_image_ctx->owner_lock.get_read();
-  m_dst_image_ctx->image_lock.get_read();
+  m_dst_image_ctx->owner_lock.lock_shared();
+  m_dst_image_ctx->image_lock.lock_shared();
   if (m_dst_image_ctx->object_map == nullptr) {
     // possible that exclusive lock was lost in background
     lderr(m_cct) << "object map is not initialized" << dendl;
 
-    m_dst_image_ctx->image_lock.put_read();
-    m_dst_image_ctx->owner_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
+    m_dst_image_ctx->owner_lock.unlock_shared();
     finish(-EINVAL);
     return;
   }
@@ -460,8 +460,8 @@ void ObjectCopyRequest<I>::send_update_object_map() {
   auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
   if (finish_op_ctx == nullptr) {
     lderr(m_cct) << "lost exclusive lock" << dendl;
-    m_dst_image_ctx->image_lock.put_read();
-    m_dst_image_ctx->owner_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
+    m_dst_image_ctx->owner_lock.unlock_shared();
     finish(r);
     return;
   }
@@ -477,8 +477,8 @@ void ObjectCopyRequest<I>::send_update_object_map() {
                                  {}, {}, false, ctx);
 
   // NOTE: state machine might complete before we reach here
-  dst_image_ctx->image_lock.put_read();
-  dst_image_ctx->owner_lock.put_read();
+  dst_image_ctx->image_lock.unlock_shared();
+  dst_image_ctx->owner_lock.unlock_shared();
   if (!sent) {
     ceph_assert(dst_snap_id == CEPH_NOSNAP);
     ctx->complete(0);
@@ -503,8 +503,9 @@ void ObjectCopyRequest<I>::handle_update_object_map(int r) {
 }
 
 template <typename I>
-Context *ObjectCopyRequest<I>::start_lock_op(RWLock &owner_lock, int* r) {
-  ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
+Context *ObjectCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock,
+                                            int* r) {
+  ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
   if (m_dst_image_ctx->exclusive_lock == nullptr) {
     return new FunctionContext([](int r) {});
   }
@@ -573,9 +574,9 @@ void ObjectCopyRequest<I>::compute_read_ops() {
   m_read_snaps = {};
   m_zero_interval = {};
 
-  m_src_image_ctx->image_lock.get_read();
+  m_src_image_ctx->image_lock.lock_shared();
   bool hide_parent = (m_src_image_ctx->parent != nullptr);
-  m_src_image_ctx->image_lock.put_read();
+  m_src_image_ctx->image_lock.unlock_shared();
 
   librados::snap_t src_copy_point_snap_id = m_snap_map.rbegin()->first;
   bool prev_exists = hide_parent;
@@ -712,7 +713,7 @@ void ObjectCopyRequest<I>::compute_read_ops() {
 template <typename I>
 void ObjectCopyRequest<I>::compute_read_from_parent_ops(
     io::Extents *parent_image_extents) {
-  assert(m_src_image_ctx->image_lock.is_locked());
+  assert(ceph_mutex_is_locked(m_src_image_ctx->image_lock));
 
   m_read_ops = {};
   m_zero_interval = {};
@@ -845,9 +846,9 @@ void ObjectCopyRequest<I>::compute_zero_ops() {
   bool fast_diff = m_dst_image_ctx->test_features(RBD_FEATURE_FAST_DIFF);
   uint64_t prev_end_size = 0;
 
-  m_src_image_ctx->image_lock.get_read();
+  m_src_image_ctx->image_lock.lock_shared();
   bool hide_parent = (m_src_image_ctx->parent != nullptr);
-  m_src_image_ctx->image_lock.put_read();
+  m_src_image_ctx->image_lock.unlock_shared();
 
   for (auto &it : m_dst_zero_interval) {
     auto src_snap_seq = it.first;
@@ -867,7 +868,7 @@ void ObjectCopyRequest<I>::compute_zero_ops() {
     }
 
     if (hide_parent) {
-      RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+      std::shared_lock image_locker{m_dst_image_ctx->image_lock};
       uint64_t parent_overlap = 0;
       int r = m_dst_image_ctx->get_parent_overlap(dst_snap_seq,
                                                   &parent_overlap);
@@ -966,7 +967,7 @@ void ObjectCopyRequest<I>::finish(int r) {
 
 template <typename I>
 void ObjectCopyRequest<I>::compute_dst_object_may_exist() {
-  RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+  std::shared_lock image_locker{m_dst_image_ctx->image_lock};
 
   auto snap_ids = m_dst_image_ctx->snaps;
   snap_ids.push_back(CEPH_NOSNAP);
index 6df98e695ad0e97f83e1296c26bf3b14b7f49e7a..a1dccf5d770a5a765a934d1fdc8110d59a0306e1 100644 (file)
@@ -181,7 +181,7 @@ private:
   void send_update_object_map();
   void handle_update_object_map(int r);
 
-  Context *start_lock_op(RWLock &owner_lock, int* r);
+  Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r);
 
   uint64_t src_to_dst_object_offset(uint64_t objectno, uint64_t offset);
 
index 654ac33071dce353768be0e17f6b35fdd79b752f..a6c43dfedd6fe79a8daa1a8ef6c01df644170142 100644 (file)
@@ -39,13 +39,13 @@ void SetHeadRequest<I>::send() {
 
 template <typename I>
 void SetHeadRequest<I>::send_set_size() {
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   if (m_image_ctx->size == m_size) {
-    m_image_ctx->image_lock.put_read();
+    m_image_ctx->image_lock.unlock_shared();
     send_detach_parent();
     return;
   }
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   ldout(m_cct, 20) << dendl;
 
@@ -87,7 +87,7 @@ void SetHeadRequest<I>::handle_set_size(int r) {
 
   {
     // adjust in-memory image size now that it's updated on disk
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     if (m_image_ctx->size > m_size) {
       if (m_image_ctx->parent_md.spec.pool_id != -1 &&
           m_image_ctx->parent_md.overlap > m_size) {
@@ -102,15 +102,15 @@ void SetHeadRequest<I>::handle_set_size(int r) {
 
 template <typename I>
 void SetHeadRequest<I>::send_detach_parent() {
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   if (m_image_ctx->parent_md.spec.pool_id == -1 ||
       (m_image_ctx->parent_md.spec == m_parent_spec &&
        m_image_ctx->parent_md.overlap == m_parent_overlap)) {
-    m_image_ctx->image_lock.put_read();
+    m_image_ctx->image_lock.unlock_shared();
     send_attach_parent();
     return;
   }
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   ldout(m_cct, 20) << dendl;
 
@@ -142,7 +142,7 @@ void SetHeadRequest<I>::handle_detach_parent(int r) {
 
   {
     // adjust in-memory parent now that it's updated on disk
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     m_image_ctx->parent_md.spec = {};
     m_image_ctx->parent_md.overlap = 0;
   }
@@ -152,14 +152,14 @@ void SetHeadRequest<I>::handle_detach_parent(int r) {
 
 template <typename I>
 void SetHeadRequest<I>::send_attach_parent() {
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   if (m_image_ctx->parent_md.spec == m_parent_spec &&
       m_image_ctx->parent_md.overlap == m_parent_overlap) {
-    m_image_ctx->image_lock.put_read();
+    m_image_ctx->image_lock.unlock_shared();
     finish(0);
     return;
   }
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   ldout(m_cct, 20) << dendl;
 
@@ -192,7 +192,7 @@ void SetHeadRequest<I>::handle_attach_parent(int r) {
 
   {
     // adjust in-memory parent now that it's updated on disk
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     m_image_ctx->parent_md.spec = m_parent_spec;
     m_image_ctx->parent_md.overlap = m_parent_overlap;
   }
@@ -202,7 +202,7 @@ void SetHeadRequest<I>::handle_attach_parent(int r) {
 
 template <typename I>
 Context *SetHeadRequest<I>::start_lock_op(int* r) {
-  RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_image_ctx->owner_lock};
   if (m_image_ctx->exclusive_lock == nullptr) {
     return new FunctionContext([](int r) {});
   }
index de57ddb144b163221d7f2e03d5b16e11ac66683b..e53386b7148e97a5e956bbd62c9657a524f3662f 100644 (file)
@@ -53,7 +53,7 @@ SnapshotCopyRequest<I>::SnapshotCopyRequest(I *src_image_ctx,
     m_dst_image_ctx(dst_image_ctx), m_snap_id_end(snap_id_end),
     m_flatten(flatten), m_work_queue(work_queue), m_snap_seqs_result(snap_seqs),
     m_snap_seqs(*snap_seqs), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
-    m_lock(unique_lock_name("SnapshotCopyRequest::m_lock", this)) {
+    m_lock(ceph::make_mutex(unique_lock_name("SnapshotCopyRequest::m_lock", this))) {
   // snap ids ordered from oldest to newest
   m_src_snap_ids.insert(src_image_ctx->snaps.begin(),
                         src_image_ctx->snaps.end());
@@ -87,7 +87,7 @@ void SnapshotCopyRequest<I>::send() {
 
 template <typename I>
 void SnapshotCopyRequest<I>::cancel() {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   ldout(m_cct, 20) << dendl;
   m_canceled = true;
@@ -104,18 +104,18 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
   for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
     librados::snap_t dst_snap_id = *snap_id_it;
 
-    m_dst_image_ctx->image_lock.get_read();
+    m_dst_image_ctx->image_lock.lock_shared();
 
     bool dst_unprotected;
     int r = m_dst_image_ctx->is_snap_unprotected(dst_snap_id, &dst_unprotected);
     if (r < 0) {
       lderr(m_cct) << "failed to retrieve destination snap unprotect status: "
            << cpp_strerror(r) << dendl;
-      m_dst_image_ctx->image_lock.put_read();
+      m_dst_image_ctx->image_lock.unlock_shared();
       finish(r);
       return;
     }
-    m_dst_image_ctx->image_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
 
     if (dst_unprotected) {
       // snap is already unprotected -- check next snap
@@ -131,7 +131,7 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
       });
 
     if (snap_seq_it != m_snap_seqs.end()) {
-      m_src_image_ctx->image_lock.get_read();
+      m_src_image_ctx->image_lock.lock_shared();
       bool src_unprotected;
       r = m_src_image_ctx->is_snap_unprotected(snap_seq_it->first,
                                                &src_unprotected);
@@ -145,11 +145,11 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
       if (r < 0) {
         lderr(m_cct) << "failed to retrieve source snap unprotect status: "
                      << cpp_strerror(r) << dendl;
-        m_src_image_ctx->image_lock.put_read();
+        m_src_image_ctx->image_lock.unlock_shared();
         finish(r);
         return;
       }
-      m_src_image_ctx->image_lock.put_read();
+      m_src_image_ctx->image_lock.unlock_shared();
 
       if (src_unprotected) {
         // source is unprotected -- unprotect destination snap
@@ -186,7 +186,7 @@ void SnapshotCopyRequest<I>::send_snap_unprotect() {
       handle_snap_unprotect(r);
       finish_op_ctx->complete(0);
     });
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   m_dst_image_ctx->operations->execute_snap_unprotect(
     cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
 }
@@ -204,7 +204,7 @@ void SnapshotCopyRequest<I>::handle_snap_unprotect(int r) {
 
   {
     // avoid the need to refresh to delete the newly unprotected snapshot
-    RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+    std::shared_lock image_locker{m_dst_image_ctx->image_lock};
     auto snap_info_it = m_dst_image_ctx->snap_info.find(m_prev_snap_id);
     if (snap_info_it != m_dst_image_ctx->snap_info.end()) {
       snap_info_it->second.protection_status =
@@ -230,9 +230,9 @@ void SnapshotCopyRequest<I>::send_snap_remove() {
     librados::snap_t dst_snap_id = *snap_id_it;
 
     cls::rbd::SnapshotNamespace snap_namespace;
-    m_dst_image_ctx->image_lock.get_read();
+    m_dst_image_ctx->image_lock.lock_shared();
     int r = m_dst_image_ctx->get_snap_namespace(dst_snap_id, &snap_namespace);
-    m_dst_image_ctx->image_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
     if (r < 0) {
       lderr(m_cct) << "failed to retrieve destination snap namespace: "
                    << m_snap_name << dendl;
@@ -283,7 +283,7 @@ void SnapshotCopyRequest<I>::send_snap_remove() {
       handle_snap_remove(r);
       finish_op_ctx->complete(0);
     });
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   m_dst_image_ctx->operations->execute_snap_remove(
     cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
 }
@@ -316,9 +316,9 @@ void SnapshotCopyRequest<I>::send_snap_create() {
     librados::snap_t src_snap_id = *snap_id_it;
 
     cls::rbd::SnapshotNamespace snap_namespace;
-    m_src_image_ctx->image_lock.get_read();
+    m_src_image_ctx->image_lock.lock_shared();
     int r = m_src_image_ctx->get_snap_namespace(src_snap_id, &snap_namespace);
-    m_src_image_ctx->image_lock.put_read();
+    m_src_image_ctx->image_lock.unlock_shared();
     if (r < 0) {
       lderr(m_cct) << "failed to retrieve source snap namespace: "
                    << m_snap_name << dendl;
@@ -343,10 +343,10 @@ void SnapshotCopyRequest<I>::send_snap_create() {
   m_prev_snap_id = *snap_id_it;
   m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
 
-  m_src_image_ctx->image_lock.get_read();
+  m_src_image_ctx->image_lock.lock_shared();
   auto snap_info_it = m_src_image_ctx->snap_info.find(m_prev_snap_id);
   if (snap_info_it == m_src_image_ctx->snap_info.end()) {
-    m_src_image_ctx->image_lock.put_read();
+    m_src_image_ctx->image_lock.unlock_shared();
     lderr(m_cct) << "failed to retrieve source snap info: " << m_snap_name
                  << dendl;
     finish(-ENOENT);
@@ -361,7 +361,7 @@ void SnapshotCopyRequest<I>::send_snap_create() {
     parent_spec = m_dst_parent_spec;
     parent_overlap = snap_info_it->second.parent.overlap;
   }
-  m_src_image_ctx->image_lock.put_read();
+  m_src_image_ctx->image_lock.unlock_shared();
 
   ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
                    << "snap_id=" << m_prev_snap_id << ", "
@@ -428,18 +428,18 @@ void SnapshotCopyRequest<I>::send_snap_protect() {
   for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
     librados::snap_t src_snap_id = *snap_id_it;
 
-    m_src_image_ctx->image_lock.get_read();
+    m_src_image_ctx->image_lock.lock_shared();
 
     bool src_protected;
     int r = m_src_image_ctx->is_snap_protected(src_snap_id, &src_protected);
     if (r < 0) {
       lderr(m_cct) << "failed to retrieve source snap protect status: "
                    << cpp_strerror(r) << dendl;
-      m_src_image_ctx->image_lock.put_read();
+      m_src_image_ctx->image_lock.unlock_shared();
       finish(r);
       return;
     }
-    m_src_image_ctx->image_lock.put_read();
+    m_src_image_ctx->image_lock.unlock_shared();
 
     if (!src_protected) {
       // snap is not protected -- check next snap
@@ -450,17 +450,17 @@ void SnapshotCopyRequest<I>::send_snap_protect() {
     auto snap_seq_it = m_snap_seqs.find(src_snap_id);
     ceph_assert(snap_seq_it != m_snap_seqs.end());
 
-    m_dst_image_ctx->image_lock.get_read();
+    m_dst_image_ctx->image_lock.lock_shared();
     bool dst_protected;
     r = m_dst_image_ctx->is_snap_protected(snap_seq_it->second, &dst_protected);
     if (r < 0) {
       lderr(m_cct) << "failed to retrieve destination snap protect status: "
                    << cpp_strerror(r) << dendl;
-      m_dst_image_ctx->image_lock.put_read();
+      m_dst_image_ctx->image_lock.unlock_shared();
       finish(r);
       return;
     }
-    m_dst_image_ctx->image_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
 
     if (!dst_protected) {
       break;
@@ -492,7 +492,7 @@ void SnapshotCopyRequest<I>::send_snap_protect() {
       handle_snap_protect(r);
       finish_op_ctx->complete(0);
     });
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   m_dst_image_ctx->operations->execute_snap_protect(
     cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
 }
@@ -527,7 +527,7 @@ void SnapshotCopyRequest<I>::send_set_head() {
   cls::rbd::ParentImageSpec parent_spec;
   uint64_t parent_overlap = 0;
   {
-    RWLock::RLocker src_locker(m_src_image_ctx->image_lock);
+    std::shared_lock src_locker{m_src_image_ctx->image_lock};
     size = m_src_image_ctx->size;
     if (!m_flatten) {
       parent_spec = m_src_image_ctx->parent_md.spec;
@@ -565,8 +565,8 @@ void SnapshotCopyRequest<I>::send_resize_object_map() {
 
   if (m_snap_id_end == CEPH_NOSNAP &&
       m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
-    RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
-    RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+    std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+    std::shared_lock image_locker{m_dst_image_ctx->image_lock};
 
     if (m_dst_image_ctx->object_map != nullptr &&
         Striper::get_num_objects(m_dst_image_ctx->layout,
@@ -611,7 +611,7 @@ void SnapshotCopyRequest<I>::handle_resize_object_map(int r) {
 template <typename I>
 bool SnapshotCopyRequest<I>::handle_cancellation() {
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     if (!m_canceled) {
       return false;
     }
@@ -631,8 +631,8 @@ void SnapshotCopyRequest<I>::error(int r) {
 template <typename I>
 int SnapshotCopyRequest<I>::validate_parent(I *image_ctx,
                                             cls::rbd::ParentImageSpec *spec) {
-  RWLock::RLocker owner_locker(image_ctx->owner_lock);
-  RWLock::RLocker image_locker(image_ctx->image_lock);
+  std::shared_lock owner_locker{image_ctx->owner_lock};
+  std::shared_lock image_locker{image_ctx->image_lock};
 
   // ensure source image's parent specs are still consistent
   *spec = image_ctx->parent_md.spec;
@@ -654,13 +654,13 @@ int SnapshotCopyRequest<I>::validate_parent(I *image_ctx,
 
 template <typename I>
 Context *SnapshotCopyRequest<I>::start_lock_op(int* r) {
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   return start_lock_op(m_dst_image_ctx->owner_lock, r);
 }
 
 template <typename I>
-Context *SnapshotCopyRequest<I>::start_lock_op(RWLock &owner_lock, int* r) {
-  ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
+Context *SnapshotCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock, int* r) {
+  ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
   if (m_dst_image_ctx->exclusive_lock == nullptr) {
     return new FunctionContext([](int r) {});
   }
index 1e4f3badba0ca8e9da8c55abbac8967223c34e3c..ede5a76758feebe21007042c7d4999e95a89a28d 100644 (file)
@@ -99,7 +99,7 @@ private:
 
   cls::rbd::ParentImageSpec m_dst_parent_spec;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
   bool m_canceled = false;
 
   void send_snap_unprotect();
@@ -127,7 +127,7 @@ private:
   int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec);
 
   Context *start_lock_op(int* r);
-  Context *start_lock_op(RWLock &owner_locki, int* r);
+  Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r);
 
   void finish(int r);
 };
index 2d5fc58d8d6048ac01e102a32afa0b7df8100b6d..5f4ae5121ddf619974de1dbfa41e37621afdd38a 100644 (file)
@@ -80,7 +80,7 @@ void SnapshotCreateRequest<I>::send_create_snap() {
       handle_create_snap(r);
       finish_op_ctx->complete(0);
     });
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   m_dst_image_ctx->operations->execute_snap_create(m_snap_namespace,
                                                    m_snap_name.c_str(),
                                                    ctx,
@@ -108,17 +108,17 @@ void SnapshotCreateRequest<I>::send_create_object_map() {
     return;
   }
 
-  m_dst_image_ctx->image_lock.get_read();
+  m_dst_image_ctx->image_lock.lock_shared();
   auto snap_it = m_dst_image_ctx->snap_ids.find(
     {cls::rbd::UserSnapshotNamespace(), m_snap_name});
   if (snap_it == m_dst_image_ctx->snap_ids.end()) {
     lderr(m_cct) << "failed to locate snap: " << m_snap_name << dendl;
-    m_dst_image_ctx->image_lock.put_read();
+    m_dst_image_ctx->image_lock.unlock_shared();
     finish(-ENOENT);
     return;
   }
   librados::snap_t local_snap_id = snap_it->second;
-  m_dst_image_ctx->image_lock.put_read();
+  m_dst_image_ctx->image_lock.unlock_shared();
 
   std::string object_map_oid(librbd::ObjectMap<>::object_map_name(
     m_dst_image_ctx->id, local_snap_id));
@@ -166,7 +166,7 @@ void SnapshotCreateRequest<I>::handle_create_object_map(int r) {
 
 template <typename I>
 Context *SnapshotCreateRequest<I>::start_lock_op(int* r) {
-  RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
   if (m_dst_image_ctx->exclusive_lock == nullptr) {
     return new FunctionContext([](int r) {});
   }
index 4d5f48b1527d963d1fd9279a5e4efa7efb730cff..bfaddc1b2ba3eadd469d97278a143cf30e9f4c5b 100644 (file)
@@ -13,7 +13,7 @@ namespace librbd {
 namespace exclusive_lock {
 
 int AutomaticPolicy::lock_requested(bool force) {
-  ceph_assert(m_image_ctx->owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
   ceph_assert(m_image_ctx->exclusive_lock != nullptr);
 
   ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
index 3d20d4b2aa0207ac53eef6823c81c95e1c897c7c..7b41fab8e36fa6e5b13865ab5abce42e7968e150 100644 (file)
@@ -108,7 +108,7 @@ void PostAcquireRequest<I>::send_open_journal() {
 
   bool journal_enabled;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
                                                  m_image_ctx.image_lock) &&
                        !m_image_ctx.get_journal_policy()->journal_disabled());
@@ -153,7 +153,7 @@ void PostAcquireRequest<I>::send_allocate_journal_tag() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << dendl;
 
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock image_locker{m_image_ctx.image_lock};
   using klass = PostAcquireRequest<I>;
   Context *ctx = create_context_callback<
     klass, &klass::handle_allocate_journal_tag>(this);
@@ -272,7 +272,7 @@ void PostAcquireRequest<I>::handle_close_object_map(int r) {
 template <typename I>
 void PostAcquireRequest<I>::apply() {
   {
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     ceph_assert(m_image_ctx.object_map == nullptr);
     m_image_ctx.object_map = m_object_map;
 
@@ -286,7 +286,7 @@ void PostAcquireRequest<I>::apply() {
 
 template <typename I>
 void PostAcquireRequest<I>::revert() {
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   m_image_ctx.object_map = nullptr;
   m_image_ctx.journal = nullptr;
 
index 346c4f39e0d365fd12f6c7cfca67cc1814b94871..6632550f7e29a22934b7b48678cbfea039690551 100644 (file)
@@ -109,7 +109,7 @@ void PreReleaseRequest<I>::send_block_writes() {
     klass, &klass::handle_block_writes>(this);
 
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     // setting the lock as required will automatically cause the IO
     // queue to re-request the lock if any IO is queued
     if (m_image_ctx.clone_copy_on_read ||
@@ -165,7 +165,7 @@ void PreReleaseRequest<I>::send_invalidate_cache() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << dendl;
 
-  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+  std::shared_lock owner_lock{m_image_ctx.owner_lock};
   Context *ctx = create_context_callback<
       PreReleaseRequest<I>,
       &PreReleaseRequest<I>::handle_invalidate_cache>(this);
@@ -212,7 +212,7 @@ void PreReleaseRequest<I>::handle_flush_notifies(int r) {
 template <typename I>
 void PreReleaseRequest<I>::send_close_journal() {
   {
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     std::swap(m_journal, m_image_ctx.journal);
   }
 
@@ -248,7 +248,7 @@ void PreReleaseRequest<I>::handle_close_journal(int r) {
 template <typename I>
 void PreReleaseRequest<I>::send_close_object_map() {
   {
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     std::swap(m_object_map, m_image_ctx.object_map);
   }
 
index 6bdb313b3696d16d1ffe5d90612038e3d644d5ae..227c40815f95b1a7dcfe0cebb05f39ce8825855a 100644 (file)
@@ -13,7 +13,7 @@ namespace librbd {
 namespace exclusive_lock {
 
 int StandardPolicy::lock_requested(bool force) {
-  ceph_assert(m_image_ctx->owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
   ceph_assert(m_image_ctx->exclusive_lock != nullptr);
 
   ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
index ac709aa0f3e7a6eb465702acdf5f44684d2b2b6f..d3920608d00c646a090750a364dd0d32b92c7cde 100644 (file)
@@ -93,7 +93,7 @@ void AttachChildRequest<I>::handle_v1_refresh(int r) {
 
   bool snap_protected = false;
   if (r == 0) {
-    RWLock::RLocker image_locker(m_parent_image_ctx->image_lock);
+    std::shared_lock image_locker{m_parent_image_ctx->image_lock};
     r = m_parent_image_ctx->is_snap_protected(m_parent_snap_id,
                                               &snap_protected);
   }
index d430680aed87c2a884b27dcb31868773701379a5..d5f2acd5fb132fee3de8073b31c51c235c579177 100644 (file)
@@ -189,13 +189,13 @@ void CloneRequest<I>::validate_parent() {
     return;
   }
 
-  m_parent_image_ctx->image_lock.get_read();
+  m_parent_image_ctx->image_lock.lock_shared();
   uint64_t p_features = m_parent_image_ctx->features;
   m_size = m_parent_image_ctx->get_image_size(m_parent_image_ctx->snap_id);
 
   bool snap_protected;
   int r = m_parent_image_ctx->is_snap_protected(m_parent_image_ctx->snap_id, &snap_protected);
-  m_parent_image_ctx->image_lock.put_read();
+  m_parent_image_ctx->image_lock.unlock_shared();
 
   if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) {
     lderr(m_cct) << "parent image must support layering" << dendl;
@@ -276,7 +276,7 @@ void CloneRequest<I>::create_child() {
   Context *ctx = create_context_callback<
     klass, &klass::handle_create_child>(this);
 
-  RWLock::RLocker image_locker(m_parent_image_ctx->image_lock);
+  std::shared_lock image_locker{m_parent_image_ctx->image_lock};
   CreateRequest<I> *req = CreateRequest<I>::create(
     m_config, m_ioctx, m_name, m_id, m_size, m_opts,
     m_non_primary_global_image_id, m_primary_mirror_uuid, true,
index 33fa3bb6bb1fdcb117e6e2c90b99c82cbe299b6b..a92c96c4190c11ca526b0ae65ea3a69fe49a7726 100644 (file)
@@ -89,7 +89,7 @@ void CloseRequest<I>::send_shut_down_io_queue() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_image_ctx->owner_lock};
   m_image_ctx->io_work_queue->shut_down(create_context_callback<
     CloseRequest<I>, &CloseRequest<I>::handle_shut_down_io_queue>(this));
 }
@@ -105,11 +105,11 @@ void CloseRequest<I>::handle_shut_down_io_queue(int r) {
 template <typename I>
 void CloseRequest<I>::send_shut_down_exclusive_lock() {
   {
-    RWLock::WLocker owner_locker(m_image_ctx->owner_lock);
+    std::unique_lock owner_locker{m_image_ctx->owner_lock};
     m_exclusive_lock = m_image_ctx->exclusive_lock;
 
     // if reading a snapshot -- possible object map is open
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     if (m_exclusive_lock == nullptr) {
       delete m_image_ctx->object_map;
       m_image_ctx->object_map = nullptr;
@@ -136,11 +136,11 @@ void CloseRequest<I>::handle_shut_down_exclusive_lock(int r) {
   ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
 
   {
-    RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+    std::shared_lock owner_locker{m_image_ctx->owner_lock};
     ceph_assert(m_image_ctx->exclusive_lock == nullptr);
 
     // object map and journal closed during exclusive lock shutdown
-    RWLock::RLocker image_locker(m_image_ctx->image_lock);
+    std::shared_lock image_locker{m_image_ctx->image_lock};
     ceph_assert(m_image_ctx->journal == nullptr);
     ceph_assert(m_image_ctx->object_map == nullptr);
   }
@@ -162,7 +162,7 @@ void CloseRequest<I>::send_flush() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+  std::shared_lock owner_locker{m_image_ctx->owner_lock};
   auto ctx = create_context_callback<
     CloseRequest<I>, &CloseRequest<I>::handle_flush>(this);
   auto aio_comp = io::AioCompletion::create_and_start(ctx, m_image_ctx,
index 242f12165fc77639057a82dbd72f08939e52b94b..584a359017c738efc3b79b8fe72128da6c22920b 100644 (file)
@@ -32,7 +32,7 @@ DetachChildRequest<I>::~DetachChildRequest() {
 template <typename I>
 void DetachChildRequest<I>::send() {
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
 
     // use oldest snapshot or HEAD for parent spec
     if (!m_image_ctx.snap_info.empty()) {
index ad3a20caf0ba1243141c0282ff2db2acb545c7aa..594c14445ee9d084aba7575746f9ac50424ba6e4 100644 (file)
@@ -118,7 +118,7 @@ void ListWatchersRequest<I>::finish(int r) {
     m_watchers->clear();
 
     if (m_object_watchers.size() > 0) {
-      RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+      std::shared_lock owner_locker{m_image_ctx.owner_lock};
       uint64_t watch_handle = m_image_ctx.image_watcher != nullptr ?
         m_image_ctx.image_watcher->get_watch_handle() : 0;
 
index fb1ada777def058598397bdb2777aaf2c94d9d12..69269f59f4616a8ba0b301527d5f3ba5affa98aa 100644 (file)
@@ -638,7 +638,7 @@ Context *OpenRequest<I>::send_set_snap(int *result) {
   uint64_t snap_id = CEPH_NOSNAP;
   std::swap(m_image_ctx->open_snap_id, snap_id);
   if (snap_id == CEPH_NOSNAP) {
-    RWLock::RLocker image_locker(m_image_ctx->image_lock);
+    std::shared_lock image_locker{m_image_ctx->image_lock};
     snap_id = m_image_ctx->get_snap_id(m_image_ctx->snap_namespace,
                                        m_image_ctx->snap_name);
   }
index 5c664c217279c2328cd901d4d853f30ef81931a3..ef66a6ad774898dd22b975c30de7fc0efde17bc2 100644 (file)
@@ -52,7 +52,7 @@ void PreRemoveRequest<I>::send() {
 
 template <typename I>
 void PreRemoveRequest<I>::acquire_exclusive_lock() {
-  RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+  std::shared_lock owner_lock{m_image_ctx->owner_lock};
   if (m_image_ctx->exclusive_lock == nullptr) {
     validate_image_removal();
     return;
@@ -64,7 +64,7 @@ void PreRemoveRequest<I>::acquire_exclusive_lock() {
   // do not attempt to open the journal when removing the image in case
   // it's corrupt
   if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) {
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     m_image_ctx->set_journal_policy(new journal::DisabledPolicy());
   }
 
@@ -136,19 +136,19 @@ void PreRemoveRequest<I>::check_image_snaps() {
   auto cct = m_image_ctx->cct;
   ldout(cct, 5) << dendl;
 
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   for (auto& snap_info : m_image_ctx->snap_info) {
     if (auto_delete_snapshot(snap_info.second)) {
       m_snap_infos.insert(snap_info);
     } else {
-      m_image_ctx->image_lock.put_read();
+      m_image_ctx->image_lock.unlock_shared();
 
       ldout(cct, 5) << "image has snapshots - not removing" << dendl;
       finish(-ENOTEMPTY);
       return;
     }
   }
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
   list_image_watchers();
 }
@@ -257,7 +257,7 @@ void PreRemoveRequest<I>::remove_snapshot() {
   ldout(cct, 20) << "snap_id=" << snap_id << ", "
                  << "snap_name=" << snap_info.name << dendl;
 
-  RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+  std::shared_lock owner_lock{m_image_ctx->owner_lock};
   auto ctx = create_context_callback<
     PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_remove_snapshot>(this);
   auto req = librbd::operation::SnapshotRemoveRequest<I>::create(
index 02b99b902025405c9902acd2510be3a0b9c8380b..88084d13b7739637c0179280341559ea16d9a625 100644 (file)
@@ -36,7 +36,7 @@ template <typename I>
 bool RefreshParentRequest<I>::is_refresh_required(
     I &child_image_ctx, const ParentImageInfo &parent_md,
     const MigrationInfo &migration_info) {
-  ceph_assert(child_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(child_image_ctx.image_lock));
   return (is_open_required(child_image_ctx, parent_md, migration_info) ||
           is_close_required(child_image_ctx, parent_md, migration_info));
 }
@@ -89,7 +89,7 @@ void RefreshParentRequest<I>::send() {
 
 template <typename I>
 void RefreshParentRequest<I>::apply() {
-  ceph_assert(m_child_image_ctx.image_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_wlocked(m_child_image_ctx.image_lock));
   std::swap(m_child_image_ctx.parent, m_parent_image_ctx);
 }
 
index ba9be9beb8f82a0ac2c6d35858b1b74266b0f42b..d521e33bff6177315beecfcb4bd37b330c3a8efa 100644 (file)
@@ -336,7 +336,7 @@ void RefreshRequest<I>::send_v2_get_mutable_metadata() {
 
   uint64_t snap_id;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     snap_id = m_image_ctx.snap_id;
   }
 
@@ -807,7 +807,7 @@ Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) {
 template <typename I>
 void RefreshRequest<I>::send_v2_refresh_parent() {
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
 
     ParentImageInfo parent_md;
     MigrationInfo migration_info;
@@ -870,7 +870,7 @@ void RefreshRequest<I>::send_v2_init_exclusive_lock() {
   Context *ctx = create_context_callback<
     klass, &klass::handle_v2_init_exclusive_lock>(this);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   m_exclusive_lock->init(m_features, ctx);
 }
 
@@ -902,7 +902,7 @@ void RefreshRequest<I>::send_v2_open_journal() {
      !m_image_ctx.exclusive_lock->is_lock_owner());
   bool journal_disabled_by_policy;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     journal_disabled_by_policy = (
       !journal_disabled &&
       m_image_ctx.get_journal_policy()->journal_disabled());
@@ -954,7 +954,7 @@ template <typename I>
 void RefreshRequest<I>::send_v2_block_writes() {
   bool disabled_journaling = false;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
                            (m_features & RBD_FEATURE_JOURNALING) == 0 &&
                            m_image_ctx.journal != nullptr);
@@ -974,7 +974,7 @@ void RefreshRequest<I>::send_v2_block_writes() {
   Context *ctx = create_context_callback<
     RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   m_image_ctx.io_work_queue->block_writes(ctx);
 }
 
@@ -1135,7 +1135,7 @@ Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
   }
 
   {
-    RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+    std::unique_lock owner_locker{m_image_ctx.owner_lock};
     ceph_assert(m_image_ctx.exclusive_lock == nullptr);
   }
 
@@ -1230,7 +1230,7 @@ Context *RefreshRequest<I>::send_flush_aio() {
     CephContext *cct = m_image_ctx.cct;
     ldout(cct, 10) << this << " " << __func__ << dendl;
 
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     auto ctx = create_context_callback<
       RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this);
     auto aio_comp = io::AioCompletion::create_and_start(
@@ -1280,8 +1280,7 @@ void RefreshRequest<I>::apply() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
 
   m_image_ctx.size = m_size;
   m_image_ctx.lockers = m_lockers;
index 865df16d74e64a2f58f434a0436d624bf86c2225..f1fe0d96cd7b941bc1d06eea99f1fb40db4d1042 100644 (file)
@@ -141,7 +141,7 @@ void RemoveRequest<I>::trim_image() {
     *m_image_ctx, create_context_callback<
       klass, &klass::handle_trim_image>(this));
 
-  RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+  std::shared_lock owner_lock{m_image_ctx->owner_lock};
   auto req = librbd::operation::TrimRequest<I>::create(
     *m_image_ctx, ctx, m_image_ctx->size, 0, m_prog_ctx);
   req->send();
index 22b009008a1b6b3e674b86cc834ad48a0360e25e..fa00ed981dc9fc52709a62a4054b1a09972dd6a4 100644 (file)
@@ -36,7 +36,7 @@ void SetFlagsRequest<I>::send_set_flags() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
-  RWLock::WLocker image_locker(m_image_ctx->image_lock);
+  std::unique_lock image_locker{m_image_ctx->image_lock};
   std::vector<uint64_t> snap_ids;
   snap_ids.push_back(CEPH_NOSNAP);
   for (auto it : m_image_ctx->snap_info) {
index 36905e625c9b7dd3ebc77711ac5212f3672cc44e..be67e176a69107a92b0fbb076aee7d04cc18492a 100644 (file)
@@ -5,7 +5,6 @@
 #define CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
 
 #include "include/buffer.h"
-#include "common/Mutex.h"
 #include <map>
 #include <string>
 
index c8e029ff7aa05c5ff4c711e03371fab59f9b68b2..43e7d7d67622b70a52aca6e05599711916eb3c6b 100644 (file)
@@ -48,7 +48,7 @@ void SetSnapRequest<I>::send() {
 template <typename I>
 void SetSnapRequest<I>::send_init_exclusive_lock() {
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     if (m_image_ctx.exclusive_lock != nullptr) {
       ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP);
       send_complete();
@@ -74,7 +74,7 @@ void SetSnapRequest<I>::send_init_exclusive_lock() {
   Context *ctx = create_context_callback<
     klass, &klass::handle_init_exclusive_lock>(this);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   m_exclusive_lock->init(m_image_ctx.features, ctx);
 }
 
@@ -103,7 +103,7 @@ void SetSnapRequest<I>::send_block_writes() {
   Context *ctx = create_context_callback<
     klass, &klass::handle_block_writes>(this);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   m_image_ctx.io_work_queue->block_writes(ctx);
 }
 
@@ -120,7 +120,7 @@ Context *SetSnapRequest<I>::handle_block_writes(int *result) {
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     auto it = m_image_ctx.snap_info.find(m_snap_id);
     if (it == m_image_ctx.snap_info.end()) {
       ldout(cct, 5) << "failed to locate snapshot '" << m_snap_id << "'"
@@ -138,7 +138,7 @@ Context *SetSnapRequest<I>::handle_block_writes(int *result) {
 template <typename I>
 Context *SetSnapRequest<I>::send_shut_down_exclusive_lock(int *result) {
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     m_exclusive_lock = m_image_ctx.exclusive_lock;
   }
 
@@ -178,7 +178,7 @@ Context *SetSnapRequest<I>::send_refresh_parent(int *result) {
   ParentImageInfo parent_md;
   bool refresh_parent;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
 
     const auto parent_info = m_image_ctx.get_parent_info(m_snap_id);
     if (parent_info == nullptr) {
@@ -323,8 +323,7 @@ int SetSnapRequest<I>::apply() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 10) << __func__ << dendl;
 
-  RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
   if (m_snap_id != CEPH_NOSNAP) {
     ceph_assert(m_image_ctx.exclusive_lock == nullptr);
     int r = m_image_ctx.snap_set(m_snap_id);
index ead5f214c2233dfca0944a1dd3fac4d03eb7a71c..1d34106f9d937538c148738751ee006aec0edc03 100644 (file)
@@ -36,7 +36,7 @@ void NotifyLockOwner::send_notify() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << dendl;
 
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   m_notifier.notify(m_bl, &m_notify_response, create_context_callback<
     NotifyLockOwner, &NotifyLockOwner::handle_notify>(this));
 }
index 1cefd073b528a19b26977fb75132b27cfedf9f80..9660a0ef4f0dea41e3d6cb0611f399c45e6c10c3 100644 (file)
@@ -171,9 +171,10 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
   void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize)
   {
     int obj_order = ictx->order;
-    ictx->image_lock.get_read();
-    info.size = ictx->get_image_size(ictx->snap_id);
-    ictx->image_lock.put_read();
+    {
+      std::shared_lock locker{ictx->image_lock};
+      info.size = ictx->get_image_size(ictx->snap_id);
+    }
     info.obj_size = 1ULL << obj_order;
     info.num_objs = Striper::get_num_objects(ictx->layout, info.size);
     info.order = obj_order;
@@ -198,15 +199,15 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
   void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx)
   {
-    ceph_assert(ictx->owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(ictx->owner_lock));
     ceph_assert(ictx->exclusive_lock == nullptr ||
                 ictx->exclusive_lock->is_lock_owner());
 
     C_SaferCond ctx;
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     operation::TrimRequest<> *req = operation::TrimRequest<>::create(
       *ictx, &ctx, ictx->size, newsize, prog_ctx);
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
     req->send();
 
     int r = ctx.wait();
@@ -520,7 +521,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       return r;
     }
 
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
                                        snap_name);
 
@@ -605,7 +606,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     int r = ictx->state->refresh_if_required();
     if (r < 0)
       return r;
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     snap_t snap_id = ictx->get_snap_id(*snap_namespace, snap_name);
     if (snap_id == CEPH_NOSNAP)
       return -ENOENT;
@@ -622,7 +623,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     if (r < 0)
       return r;
 
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
     if (snap_id == CEPH_NOSNAP)
       return -ENOENT;
@@ -953,7 +954,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     int r = ictx->state->refresh_if_required();
     if (r < 0)
       return r;
-    RWLock::RLocker l2(ictx->image_lock);
+    std::shared_lock l2{ictx->image_lock};
     *size = ictx->get_image_size(ictx->snap_id);
     return 0;
   }
@@ -963,7 +964,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     int r = ictx->state->refresh_if_required();
     if (r < 0)
       return r;
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     *features = ictx->features;
     return 0;
   }
@@ -973,7 +974,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     int r = ictx->state->refresh_if_required();
     if (r < 0)
       return r;
-    RWLock::RLocker image_locker(ictx->image_lock);
+    std::shared_lock image_locker{ictx->image_lock};
     return ictx->get_parent_overlap(ictx->snap_id, overlap);
   }
 
@@ -984,7 +985,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       return r;
     }
 
-    RWLock::RLocker l2(ictx->image_lock);
+    std::shared_lock l2{ictx->image_lock};
     return ictx->get_flags(ictx->snap_id, flags);
   }
 
@@ -1009,7 +1010,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
     *is_owner = false;
 
-    RWLock::RLocker owner_locker(ictx->owner_lock);
+    std::shared_lock owner_locker{ictx->owner_lock};
     if (ictx->exclusive_lock == nullptr) {
       return 0;
     }
@@ -1039,7 +1040,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
     C_SaferCond lock_ctx;
     {
-      RWLock::WLocker l(ictx->owner_lock);
+      std::unique_lock l{ictx->owner_lock};
 
       if (ictx->exclusive_lock == nullptr) {
        lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
@@ -1065,7 +1066,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       return r;
     }
 
-    RWLock::RLocker l(ictx->owner_lock);
+    std::shared_lock l{ictx->owner_lock};
     if (ictx->exclusive_lock == nullptr) {
       return -EINVAL;
     } else if (!ictx->exclusive_lock->is_lock_owner()) {
@@ -1083,7 +1084,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
     C_SaferCond lock_ctx;
     {
-      RWLock::WLocker l(ictx->owner_lock);
+      std::unique_lock l{ictx->owner_lock};
 
       if (ictx->exclusive_lock == nullptr ||
          !ictx->exclusive_lock->is_lock_owner()) {
@@ -1150,7 +1151,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     managed_lock::Locker locker;
     C_SaferCond get_owner_ctx;
     {
-      RWLock::RLocker l(ictx->owner_lock);
+      std::shared_lock l{ictx->owner_lock};
 
       if (ictx->exclusive_lock == nullptr) {
         lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
@@ -1174,7 +1175,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
     C_SaferCond break_ctx;
     {
-      RWLock::RLocker l(ictx->owner_lock);
+      std::shared_lock l{ictx->owner_lock};
 
       if (ictx->exclusive_lock == nullptr) {
         lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
@@ -1201,7 +1202,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     if (r < 0)
       return r;
 
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     for (map<snap_t, SnapInfo>::iterator it = ictx->snap_info.begin();
         it != ictx->snap_info.end(); ++it) {
       snap_info_t info;
@@ -1223,7 +1224,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     if (r < 0)
       return r;
 
-    RWLock::RLocker l(ictx->image_lock);
+    std::shared_lock l{ictx->image_lock};
     *exists = ictx->get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP;
     return 0;
   }
@@ -1325,10 +1326,10 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
                   << (src->snap_name.length() ? "@" + src->snap_name : "")
                   << " -> " << destname << " opts = " << opts << dendl;
 
-    src->image_lock.get_read();
+    src->image_lock.lock_shared();
     uint64_t features = src->features;
     uint64_t src_size = src->get_image_size(src->snap_id);
-    src->image_lock.put_read();
+    src->image_lock.unlock_shared();
     uint64_t format = src->old_format ? 1 : 2;
     if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
       opts.set(RBD_IMAGE_OPTION_FORMAT, format);
@@ -1470,13 +1471,13 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
   int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size)
   {
-    src->image_lock.get_read();
+    src->image_lock.lock_shared();
     uint64_t src_size = src->get_image_size(src->snap_id);
-    src->image_lock.put_read();
+    src->image_lock.unlock_shared();
 
-    dest->image_lock.get_read();
+    dest->image_lock.lock_shared();
     uint64_t dest_size = dest->get_image_size(dest->snap_id);
-    dest->image_lock.put_read();
+    dest->image_lock.unlock_shared();
 
     CephContext *cct = src->cct;
     if (dest_size < src_size) {
@@ -1514,7 +1515,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       trace.init("copy", &src->trace_endpoint);
     }
 
-    RWLock::RLocker owner_lock(src->owner_lock);
+    std::shared_lock owner_lock{src->owner_lock};
     SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
     uint64_t period = src->get_stripe_period();
     unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
@@ -1526,7 +1527,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       }
 
       {
-        RWLock::RLocker image_locker(src->image_lock);
+       std::shared_lock image_locker{src->image_lock};
         if (src->object_map != nullptr) {
           bool skip = true;
           // each period is related to src->stripe_count objects, check them all
@@ -1576,7 +1577,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
     if (r < 0)
       return r;
 
-    RWLock::RLocker locker(ictx->image_lock);
+    std::shared_lock locker{ictx->image_lock};
     if (exclusive)
       *exclusive = ictx->exclusive_locked;
     if (tag)
@@ -1614,7 +1615,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
      * duplicate that code.
      */
     {
-      RWLock::RLocker locker(ictx->image_lock);
+      std::shared_lock locker{ictx->image_lock};
       r = rados::cls::lock::lock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME,
                                 exclusive ? LOCK_EXCLUSIVE : LOCK_SHARED,
                                 cookie, tag, "", utime_t(), 0);
@@ -1637,7 +1638,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       return r;
 
     {
-      RWLock::RLocker locker(ictx->image_lock);
+      std::shared_lock locker{ictx->image_lock};
       r = rados::cls::lock::unlock(&ictx->md_ctx, ictx->header_oid,
                                   RBD_LOCK_NAME, cookie);
       if (r < 0) {
@@ -1735,9 +1736,9 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       return r;
 
     uint64_t mylen = len;
-    ictx->image_lock.get_read();
+    ictx->image_lock.lock_shared();
     r = clip_io(ictx, off, &mylen);
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
     if (r < 0)
       return r;
 
@@ -1750,7 +1751,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
       trace.init("read_iterate", &ictx->trace_endpoint);
     }
 
-    RWLock::RLocker owner_locker(ictx->owner_lock);
+    std::shared_lock owner_locker{ictx->owner_lock};
     start_time = coarse_mono_clock::now();
     while (left > 0) {
       uint64_t period_off = off - (off % period);
@@ -1789,7 +1790,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
   // validate extent against image size; clip to image size if necessary
   int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len)
   {
-    ceph_assert(ictx->image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
     uint64_t image_size = ictx->get_image_size(ictx->snap_id);
     bool snap_exists = ictx->snap_exists;
 
@@ -1823,7 +1824,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) {
 
     C_SaferCond ctx;
     {
-      RWLock::RLocker owner_locker(ictx->owner_lock);
+      std::shared_lock owner_locker{ictx->owner_lock};
       ictx->io_object_dispatcher->invalidate_cache(&ctx);
     }
     r = ctx.wait();
index c5a3bc932e0a6535b02de9f0780c45bd086854e3..2a00cd6699ac42914881a7c1b619576708b8fe4a 100644 (file)
@@ -25,7 +25,7 @@ struct C_CompleteFlushes : public Context {
     : image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) {
   }
   void finish(int r) override {
-    RWLock::RLocker owner_locker(image_ctx->owner_lock);
+    std::shared_lock owner_locker{image_ctx->owner_lock};
     while (!flush_contexts.empty()) {
       Context *flush_ctx = flush_contexts.front();
       flush_contexts.pop_front();
@@ -43,7 +43,7 @@ void AsyncOperation::start_op(ImageCtx &image_ctx) {
   m_image_ctx = &image_ctx;
 
   ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
-  Mutex::Locker l(m_image_ctx->async_ops_lock);
+  std::lock_guard l{m_image_ctx->async_ops_lock};
   m_image_ctx->async_ops.push_front(&m_xlist_item);
 }
 
@@ -51,7 +51,7 @@ void AsyncOperation::finish_op() {
   ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
 
   {
-    Mutex::Locker l(m_image_ctx->async_ops_lock);
+    std::lock_guard l{m_image_ctx->async_ops_lock};
     xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
     ++iter;
     ceph_assert(m_xlist_item.remove_myself());
@@ -76,7 +76,7 @@ void AsyncOperation::finish_op() {
 
 void AsyncOperation::flush(Context* on_finish) {
   {
-    Mutex::Locker locker(m_image_ctx->async_ops_lock);
+    std::lock_guard locker{m_image_ctx->async_ops_lock};
     xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
     ++iter;
 
index 0f410966ad472739cebe087f82b79d8636599929..e9f5d2487ff89fb4acccc98dbbcd645a7d60d43b 100644 (file)
@@ -3,9 +3,9 @@
 
 #include "librbd/io/CopyupRequest.h"
 #include "common/ceph_context.h"
+#include "common/ceph_mutex.h"
 #include "common/dout.h"
 #include "common/errno.h"
-#include "common/Mutex.h"
 #include "common/WorkQueue.h"
 #include "librbd/AsyncObjectThrottle.h"
 #include "librbd/ExclusiveLock.h"
@@ -52,13 +52,13 @@ public:
 
   int send() override {
     auto& image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     if (image_ctx.exclusive_lock == nullptr) {
       return 1;
     }
     ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
 
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.object_map == nullptr) {
       return 1;
     }
@@ -73,7 +73,7 @@ public:
 
   int update_head() {
     auto& image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
     bool sent = image_ctx.object_map->template aio_update<Context>(
       CEPH_NOSNAP, m_object_no, m_head_object_map_state, {}, m_trace, false,
@@ -83,7 +83,7 @@ public:
 
   int update_snapshot(uint64_t snap_id) {
     auto& image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
     uint8_t state = OBJECT_EXISTS;
     if (image_ctx.test_features(RBD_FEATURE_FAST_DIFF, image_ctx.image_lock) &&
@@ -115,8 +115,7 @@ CopyupRequest<I>::CopyupRequest(I *ictx, uint64_t objectno,
                                 Extents &&image_extents,
                                 const ZTracer::Trace &parent_trace)
   : m_image_ctx(ictx), m_object_no(objectno), m_image_extents(image_extents),
-    m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace)),
-    m_lock("CopyupRequest", false, false)
+    m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace))
 {
   m_async_op.start_op(*util::get_image_ctx(m_image_ctx));
 }
@@ -129,7 +128,7 @@ CopyupRequest<I>::~CopyupRequest() {
 
 template <typename I>
 void CopyupRequest<I>::append_request(AbstractObjectWriteRequest<I> *req) {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << "object_request=" << req << ", "
@@ -149,7 +148,7 @@ void CopyupRequest<I>::send() {
 template <typename I>
 void CopyupRequest<I>::read_from_parent() {
   auto cct = m_image_ctx->cct;
-  RWLock::RLocker image_locker(m_image_ctx->image_lock);
+  std::shared_lock image_locker{m_image_ctx->image_lock};
 
   if (m_image_ctx->parent == nullptr) {
     ldout(cct, 5) << "parent detached" << dendl;
@@ -188,15 +187,15 @@ void CopyupRequest<I>::handle_read_from_parent(int r) {
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << "r=" << r << dendl;
 
-  m_image_ctx->image_lock.get_read();
-  m_lock.Lock();
+  m_image_ctx->image_lock.lock_shared();
+  m_lock.lock();
   m_copyup_is_zero = m_copyup_data.is_zero();
   m_copyup_required = is_copyup_required();
   disable_append_requests();
 
   if (r < 0 && r != -ENOENT) {
-    m_lock.Unlock();
-    m_image_ctx->image_lock.put_read();
+    m_lock.unlock();
+    m_image_ctx->image_lock.unlock_shared();
 
     lderr(cct) << "error reading from parent: " << cpp_strerror(r) << dendl;
     finish(r);
@@ -204,8 +203,8 @@ void CopyupRequest<I>::handle_read_from_parent(int r) {
   }
 
   if (!m_copyup_required) {
-    m_lock.Unlock();
-    m_image_ctx->image_lock.put_read();
+    m_lock.unlock();
+    m_image_ctx->image_lock.unlock_shared();
 
     ldout(cct, 20) << "no-op, skipping" << dendl;
     finish(0);
@@ -219,8 +218,8 @@ void CopyupRequest<I>::handle_read_from_parent(int r) {
                       m_image_ctx->snaps.rend());
   }
 
-  m_lock.Unlock();
-  m_image_ctx->image_lock.put_read();
+  m_lock.unlock();
+  m_image_ctx->image_lock.unlock_shared();
 
   update_object_maps();
 }
@@ -228,12 +227,12 @@ void CopyupRequest<I>::handle_read_from_parent(int r) {
 template <typename I>
 void CopyupRequest<I>::deep_copy() {
   auto cct = m_image_ctx->cct;
-  ceph_assert(m_image_ctx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
   ceph_assert(m_image_ctx->parent != nullptr);
 
-  m_lock.Lock();
+  m_lock.lock();
   m_flatten = is_copyup_required() ? true : m_image_ctx->migration_info.flatten;
-  m_lock.Unlock();
+  m_lock.unlock();
 
   ldout(cct, 20) << "flatten=" << m_flatten << dendl;
 
@@ -251,12 +250,12 @@ void CopyupRequest<I>::handle_deep_copy(int r) {
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << "r=" << r << dendl;
 
-  m_image_ctx->image_lock.get_read();
-  m_lock.Lock();
+  m_image_ctx->image_lock.lock_shared();
+  m_lock.lock();
   m_copyup_required = is_copyup_required();
   if (r == -ENOENT && !m_flatten && m_copyup_required) {
-    m_lock.Unlock();
-    m_image_ctx->image_lock.put_read();
+    m_lock.unlock();
+    m_image_ctx->image_lock.unlock_shared();
 
     ldout(cct, 10) << "restart deep-copy with flatten" << dendl;
     send();
@@ -266,8 +265,8 @@ void CopyupRequest<I>::handle_deep_copy(int r) {
   disable_append_requests();
 
   if (r < 0 && r != -ENOENT) {
-    m_lock.Unlock();
-    m_image_ctx->image_lock.put_read();
+    m_lock.unlock();
+    m_image_ctx->image_lock.unlock_shared();
 
     lderr(cct) << "error encountered during deep-copy: " << cpp_strerror(r)
                << dendl;
@@ -276,8 +275,8 @@ void CopyupRequest<I>::handle_deep_copy(int r) {
   }
 
   if (!m_copyup_required && !is_update_object_map_required(r)) {
-    m_lock.Unlock();
-    m_image_ctx->image_lock.put_read();
+    m_lock.unlock();
+    m_image_ctx->image_lock.unlock_shared();
 
     if (r == -ENOENT) {
       r = 0;
@@ -296,16 +295,16 @@ void CopyupRequest<I>::handle_deep_copy(int r) {
     compute_deep_copy_snap_ids();
   }
 
-  m_lock.Unlock();
-  m_image_ctx->image_lock.put_read();
+  m_lock.unlock();
+  m_image_ctx->image_lock.unlock_shared();
 
   update_object_maps();
 }
 
 template <typename I>
 void CopyupRequest<I>::update_object_maps() {
-  RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
-  RWLock::RLocker image_locker(m_image_ctx->image_lock);
+  std::shared_lock owner_locker{m_image_ctx->owner_lock};
+  std::shared_lock image_locker{m_image_ctx->image_lock};
   if (m_image_ctx->object_map == nullptr) {
     image_locker.unlock();
     owner_locker.unlock();
@@ -370,13 +369,13 @@ void CopyupRequest<I>::handle_update_object_maps(int r) {
 template <typename I>
 void CopyupRequest<I>::copyup() {
   auto cct = m_image_ctx->cct;
-  m_image_ctx->image_lock.get_read();
+  m_image_ctx->image_lock.lock_shared();
   auto snapc = m_image_ctx->snapc;
-  m_image_ctx->image_lock.put_read();
+  m_image_ctx->image_lock.unlock_shared();
 
-  m_lock.Lock();
+  m_lock.lock();
   if (!m_copyup_required) {
-    m_lock.Unlock();
+    m_lock.unlock();
 
     ldout(cct, 20) << "skipping copyup" << dendl;
     finish(0);
@@ -426,7 +425,7 @@ void CopyupRequest<I>::copyup() {
       ++m_pending_copyups;
     }
   }
-  m_lock.Unlock();
+  m_lock.unlock();
 
   // issue librados ops at the end to simplify test cases
   std::string oid(data_object_name(m_image_ctx, m_object_no));
@@ -471,7 +470,7 @@ void CopyupRequest<I>::handle_copyup(int r) {
   auto cct = m_image_ctx->cct;
   unsigned pending_copyups;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_pending_copyups > 0);
     pending_copyups = --m_pending_copyups;
   }
@@ -526,13 +525,13 @@ void CopyupRequest<I>::complete_requests(bool override_restart_retval, int r) {
 
 template <typename I>
 void CopyupRequest<I>::disable_append_requests() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   m_append_request_permitted = false;
 }
 
 template <typename I>
 void CopyupRequest<I>::remove_from_list() {
-  Mutex::Locker copyup_list_locker(m_image_ctx->copyup_list_lock);
+  std::lock_guard copyup_list_locker{m_image_ctx->copyup_list_lock};
 
   auto it = m_image_ctx->copyup_list.find(m_object_no);
   if (it != m_image_ctx->copyup_list.end()) {
@@ -542,7 +541,7 @@ void CopyupRequest<I>::remove_from_list() {
 
 template <typename I>
 bool CopyupRequest<I>::is_copyup_required() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   bool copy_on_read = m_pending_requests.empty();
   if (copy_on_read) {
@@ -564,13 +563,13 @@ bool CopyupRequest<I>::is_copyup_required() {
 
 template <typename I>
 bool CopyupRequest<I>::is_deep_copy() const {
-  ceph_assert(m_image_ctx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
   return !m_image_ctx->migration_info.empty();
 }
 
 template <typename I>
 bool CopyupRequest<I>::is_update_object_map_required(int r) {
-  ceph_assert(m_image_ctx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
 
   if (r < 0) {
     return false;
@@ -593,7 +592,7 @@ bool CopyupRequest<I>::is_update_object_map_required(int r) {
 
 template <typename I>
 void CopyupRequest<I>::compute_deep_copy_snap_ids() {
-  ceph_assert(m_image_ctx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
 
   // don't copy ids for the snaps updated by object deep copy or
   // that don't overlap
index 25f19e14c31473edeba70ffca40d9830f0009a1d..01d679f6213b7d8361b8a903cae2dcfee61a3015 100644 (file)
@@ -7,7 +7,7 @@
 #include "include/int_types.h"
 #include "include/rados/librados.hpp"
 #include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/zipkin_trace.h"
 #include "librbd/io/AsyncOperation.h"
 #include "librbd/io/Types.h"
@@ -96,7 +96,7 @@ private:
   std::vector<uint64_t> m_snap_ids;
   bool m_first_snap_is_clean = false;
 
-  Mutex m_lock;
+  ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false);
   WriteRequests m_pending_requests;
   unsigned m_pending_copyups = 0;
 
index 166c8702308b7872add7c2721c244d85f18bf95f..d835368153f842c596c0cefc9ec3339aece10ccf 100644 (file)
@@ -64,19 +64,19 @@ void readahead(I *ictx, const Extents& image_extents) {
     total_bytes += image_extent.second;
   }
 
-  ictx->image_lock.get_read();
+  ictx->image_lock.lock_shared();
   auto total_bytes_read = ictx->total_bytes_read.fetch_add(total_bytes);
   bool abort = (
     ictx->readahead_disable_after_bytes != 0 &&
     total_bytes_read > ictx->readahead_disable_after_bytes);
   if (abort) {
-    ictx->image_lock.put_read();
+    ictx->image_lock.unlock_shared();
     return;
   }
 
   uint64_t image_size = ictx->get_image_size(ictx->snap_id);
   auto snap_id = ictx->snap_id;
-  ictx->image_lock.put_read();
+  ictx->image_lock.unlock_shared();
 
   auto readahead_extent = ictx->readahead.update(image_extents, image_size);
   uint64_t readahead_offset = readahead_extent.first;
@@ -247,7 +247,7 @@ void ImageRequest<I>::send() {
 
 template <typename I>
 int ImageRequest<I>::clip_request() {
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock image_locker{m_image_ctx.image_lock};
   for (auto &image_extent : m_image_extents) {
     auto clip_len = image_extent.second;
     int r = clip_io(get_image_ctx(&m_image_ctx), image_extent.first, &clip_len);
@@ -286,7 +286,7 @@ void ImageRequest<I>::update_timestamp() {
 
   utime_t ts = ceph_clock_now();
   {
-    RWLock::RLocker timestamp_locker(m_image_ctx.timestamp_lock);
+    std::shared_lock timestamp_locker{m_image_ctx.timestamp_lock};
     if(!should_update_timestamp(ts, std::invoke(get_timestamp_fn, m_image_ctx),
                                 update_interval)) {
       return;
@@ -294,7 +294,7 @@ void ImageRequest<I>::update_timestamp() {
   }
 
   {
-    RWLock::WLocker timestamp_locker(m_image_ctx.timestamp_lock);
+    std::unique_lock timestamp_locker{m_image_ctx.timestamp_lock};
     bool update = should_update_timestamp(
       ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval);
     if (!update) {
@@ -355,7 +355,7 @@ void ImageReadRequest<I>::send_request() {
   {
     // prevent image size from changing between computing clip and recording
     // pending async operation
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     snap_id = image_ctx.snap_id;
   }
 
@@ -419,7 +419,7 @@ void AbstractImageWriteRequest<I>::send_request() {
   {
     // prevent image size from changing between computing clip and recording
     // pending async operation
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.snap_id != CEPH_NOSNAP || image_ctx.read_only) {
       aio_comp->fail(-EROFS);
       return;
@@ -666,7 +666,7 @@ void ImageFlushRequest<I>::send_request() {
 
   bool journaling = false;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     journaling = (m_flush_source == FLUSH_SOURCE_USER &&
                   image_ctx.journal != nullptr &&
                   image_ctx.journal->is_journal_appending());
index fb15ca339ea64b69eb4263e4eece805be870c959..5bb0f1664f9de46f30e8d91edcece498243fc907 100644 (file)
@@ -93,12 +93,13 @@ ImageRequestWQ<I>::ImageRequestWQ(I *image_ctx, const string &name,
                                  time_t ti, ThreadPool *tp)
   : ThreadPool::PointerWQ<ImageDispatchSpec<I> >(name, ti, 0, tp),
     m_image_ctx(*image_ctx),
-    m_lock(util::unique_lock_name("ImageRequestWQ<I>::m_lock", this)) {
+    m_lock(ceph::make_shared_mutex(
+      util::unique_lock_name("ImageRequestWQ<I>::m_lock", this))) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << "ictx=" << image_ctx << dendl;
 
   SafeTimer *timer;
-  Mutex *timer_lock;
+  ceph::mutex *timer_lock;
   ImageCtx::get_timer_instance(cct, &timer, &timer_lock);
 
   for (auto flag : throttle_flags) {
@@ -137,9 +138,9 @@ ssize_t ImageRequestWQ<I>::write(uint64_t off, uint64_t len,
   ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
                  << "len = " << len << dendl;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
   if (r < 0) {
     lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
     return r;
@@ -163,9 +164,9 @@ ssize_t ImageRequestWQ<I>::discard(uint64_t off, uint64_t len,
   ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
                  << "len = " << len << dendl;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
   if (r < 0) {
     lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
     return r;
@@ -189,9 +190,9 @@ ssize_t ImageRequestWQ<I>::writesame(uint64_t off, uint64_t len,
   ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
                  << "len = " << len << ", data_len " << bl.length() << dendl;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
   if (r < 0) {
     lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
     return r;
@@ -218,9 +219,9 @@ ssize_t ImageRequestWQ<I>::compare_and_write(uint64_t off, uint64_t len,
   ldout(cct, 20) << "compare_and_write ictx=" << &m_image_ctx << ", off="
                  << off << ", " << "len = " << len << dendl;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
   if (r < 0) {
     lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
     return r;
@@ -283,7 +284,7 @@ void ImageRequestWQ<I>::aio_read(AioCompletion *c, uint64_t off, uint64_t len,
 
   // if journaling is enabled -- we need to replay the journal because
   // it might contain an uncommitted write
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty() ||
       require_lock_on_read()) {
     queue(ImageDispatchSpec<I>::create_read_request(
@@ -323,7 +324,7 @@ void ImageRequestWQ<I>::aio_write(AioCompletion *c, uint64_t off, uint64_t len,
     return;
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked()) {
     queue(ImageDispatchSpec<I>::create_write_request(
             m_image_ctx, c, {{off, len}}, std::move(bl), op_flags, trace));
@@ -362,7 +363,7 @@ void ImageRequestWQ<I>::aio_discard(AioCompletion *c, uint64_t off,
     return;
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked()) {
     queue(ImageDispatchSpec<I>::create_discard_request(
             m_image_ctx, c, off, len, discard_granularity_bytes, trace));
@@ -397,7 +398,7 @@ void ImageRequestWQ<I>::aio_flush(AioCompletion *c, bool native_async) {
     return;
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty()) {
     queue(ImageDispatchSpec<I>::create_flush_request(
             m_image_ctx, c, FLUSH_SOURCE_USER, trace));
@@ -435,7 +436,7 @@ void ImageRequestWQ<I>::aio_writesame(AioCompletion *c, uint64_t off,
     return;
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked()) {
     queue(ImageDispatchSpec<I>::create_write_same_request(
             m_image_ctx, c, off, len, std::move(bl), op_flags, trace));
@@ -476,7 +477,7 @@ void ImageRequestWQ<I>::aio_compare_and_write(AioCompletion *c,
     return;
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   if (m_image_ctx.non_blocking_aio || writes_blocked()) {
     queue(ImageDispatchSpec<I>::create_compare_and_write_request(
             m_image_ctx, c, {{off, len}}, std::move(cmp_bl), std::move(bl),
@@ -493,10 +494,10 @@ void ImageRequestWQ<I>::aio_compare_and_write(AioCompletion *c,
 
 template <typename I>
 void ImageRequestWQ<I>::shut_down(Context *on_shutdown) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     ceph_assert(!m_shutdown);
     m_shutdown = true;
 
@@ -522,11 +523,11 @@ int ImageRequestWQ<I>::block_writes() {
 
 template <typename I>
 void ImageRequestWQ<I>::block_writes(Context *on_blocked) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     ++m_write_blockers;
     ldout(cct, 5) << &m_image_ctx << ", " << "num="
                   << m_write_blockers << dendl;
@@ -547,7 +548,7 @@ void ImageRequestWQ<I>::unblock_writes() {
   bool wake_up = false;
   Contexts waiter_contexts;
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     ceph_assert(m_write_blockers > 0);
     --m_write_blockers;
 
@@ -569,11 +570,11 @@ void ImageRequestWQ<I>::unblock_writes() {
 
 template <typename I>
 void ImageRequestWQ<I>::wait_on_writes_unblocked(Context *on_unblocked) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     ldout(cct, 20) << &m_image_ctx << ", " << "write_blockers="
                    << m_write_blockers << dendl;
     if (!m_unblocked_write_waiter_contexts.empty() || m_write_blockers > 0) {
@@ -592,7 +593,7 @@ void ImageRequestWQ<I>::set_require_lock(Direction direction, bool enabled) {
 
   bool wake_up = false;
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     switch (direction) {
     case DIRECTION_READ:
       wake_up = (enabled != m_require_lock_on_read);
@@ -720,7 +721,7 @@ void *ImageRequestWQ<I>::_void_dequeue() {
   bool lock_required;
   bool refresh_required = m_image_ctx.state->is_refresh_required();
   {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     bool write_op = peek_item->is_write_op();
     lock_required = is_lock_required(write_op);
     if (write_op) {
@@ -742,7 +743,7 @@ void *ImageRequestWQ<I>::_void_dequeue() {
 
   if (lock_required) {
     this->get_pool_lock().unlock();
-    m_image_ctx.owner_lock.get_read();
+    m_image_ctx.owner_lock.lock_shared();
     if (m_image_ctx.exclusive_lock != nullptr) {
       ldout(cct, 5) << "exclusive lock required: delaying IO " << item << dendl;
       if (!m_image_ctx.get_exclusive_lock_policy()->may_auto_request_lock()) {
@@ -761,7 +762,7 @@ void *ImageRequestWQ<I>::_void_dequeue() {
       // raced with the exclusive lock being disabled
       lock_required = false;
     }
-    m_image_ctx.owner_lock.put_read();
+    m_image_ctx.owner_lock.unlock_shared();
     this->get_pool_lock().lock();
 
     if (lock_required) {
@@ -804,7 +805,7 @@ void ImageRequestWQ<I>::process(ImageDispatchSpec<I> *req) {
 
 template <typename I>
 void ImageRequestWQ<I>::finish_queued_io(ImageDispatchSpec<I> *req) {
-  RWLock::RLocker locker(m_lock);
+  std::shared_lock locker{m_lock};
   if (req->is_write_op()) {
     ceph_assert(m_queued_writes > 0);
     m_queued_writes--;
@@ -818,7 +819,7 @@ template <typename I>
 void ImageRequestWQ<I>::finish_in_flight_write() {
   bool writes_blocked = false;
   {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     ceph_assert(m_in_flight_writes > 0);
     if (--m_in_flight_writes == 0 &&
         !m_write_blocker_contexts.empty()) {
@@ -833,7 +834,7 @@ void ImageRequestWQ<I>::finish_in_flight_write() {
 
 template <typename I>
 int ImageRequestWQ<I>::start_in_flight_io(AioCompletion *c) {
-  RWLock::RLocker locker(m_lock);
+  std::shared_lock locker{m_lock};
 
   if (m_shutdown) {
     CephContext *cct = m_image_ctx.cct;
@@ -851,7 +852,7 @@ template <typename I>
 void ImageRequestWQ<I>::finish_in_flight_io() {
   Context *on_shutdown;
   {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     if (--m_in_flight_ios > 0 || !m_shutdown) {
       return;
     }
@@ -877,14 +878,14 @@ void ImageRequestWQ<I>::fail_in_flight_io(
 
 template <typename I>
 bool ImageRequestWQ<I>::is_lock_required(bool write_op) const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   return ((write_op && m_require_lock_on_write) ||
           (!write_op && m_require_lock_on_read));
 }
 
 template <typename I>
 void ImageRequestWQ<I>::queue(ImageDispatchSpec<I> *req) {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << "ictx=" << &m_image_ctx << ", "
@@ -941,7 +942,7 @@ template <typename I>
 void ImageRequestWQ<I>::handle_blocked_writes(int r) {
   Contexts contexts;
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     contexts.swap(m_write_blocker_contexts);
   }
 
index a9134fc4a1bf58659d4fd3ae524e37a539b4a7c2..daa596330d82c1cbb3473381ad2daa7c74e784f1 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_IO_IMAGE_REQUEST_WQ_H
 
 #include "include/Context.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
 #include "common/Throttle.h"
 #include "common/WorkQueue.h"
 #include "librbd/io/Types.h"
@@ -62,7 +62,7 @@ public:
   void shut_down(Context *on_shutdown);
 
   inline bool writes_blocked() const {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     return (m_write_blockers > 0);
   }
 
@@ -94,7 +94,7 @@ private:
   struct C_RefreshFinish;
 
   ImageCtxT &m_image_ctx;
-  mutable RWLock m_lock;
+  mutable ceph::shared_mutex m_lock;
   Contexts m_write_blocker_contexts;
   uint32_t m_write_blockers = 0;
   Contexts m_unblocked_write_waiter_contexts;
@@ -116,11 +116,11 @@ private:
   bool is_lock_required(bool write_op) const;
 
   inline bool require_lock_on_read() const {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     return m_require_lock_on_read;
   }
   inline bool writes_empty() const {
-    RWLock::RLocker locker(m_lock);
+    std::shared_lock locker{m_lock};
     return (m_queued_writes == 0);
   }
 
index 5e5d482ff59a00567695556aada949614bff795a..513f8adf43dc929b4857ab9fb53a61ba26e9e8fa 100644 (file)
@@ -34,11 +34,11 @@ struct ObjectDispatcher<I>::C_LayerIterator : public Context {
 
   void complete(int r) override {
     while (true) {
-      object_dispatcher->m_lock.get_read();
+      object_dispatcher->m_lock.lock_shared();
       auto it = object_dispatcher->m_object_dispatches.upper_bound(
         object_dispatch_layer);
       if (it == object_dispatcher->m_object_dispatches.end()) {
-        object_dispatcher->m_lock.put_read();
+        object_dispatcher->m_lock.unlock_shared();
         Context::complete(r);
         return;
       }
@@ -48,7 +48,7 @@ struct ObjectDispatcher<I>::C_LayerIterator : public Context {
 
       // prevent recursive locking back into the dispatcher while handling IO
       object_dispatch_meta.async_op_tracker->start_op();
-      object_dispatcher->m_lock.put_read();
+      object_dispatcher->m_lock.unlock_shared();
 
       // next loop should start after current layer
       object_dispatch_layer = object_dispatch->get_object_dispatch_layer();
@@ -175,8 +175,9 @@ struct ObjectDispatcher<I>::SendVisitor : public boost::static_visitor<bool> {
 template <typename I>
 ObjectDispatcher<I>::ObjectDispatcher(I* image_ctx)
   : m_image_ctx(image_ctx),
-    m_lock(librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock",
-                                          this)) {
+    m_lock(ceph::make_shared_mutex(
+      librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock",
+                                    this))) {
   // configure the core object dispatch handler on startup
   auto object_dispatch = new ObjectDispatch(image_ctx);
   m_object_dispatches[object_dispatch->get_object_dispatch_layer()] =
@@ -195,7 +196,7 @@ void ObjectDispatcher<I>::shut_down(Context* on_finish) {
 
   std::map<ObjectDispatchLayer, ObjectDispatchMeta> object_dispatches;
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     std::swap(object_dispatches, m_object_dispatches);
   }
 
@@ -212,7 +213,7 @@ void ObjectDispatcher<I>::register_object_dispatch(
   auto type = object_dispatch->get_object_dispatch_layer();
   ldout(cct, 5) << "object_dispatch_layer=" << type << dendl;
 
-  RWLock::WLocker locker(m_lock);
+  std::unique_lock locker{m_lock};
   ceph_assert(type < OBJECT_DISPATCH_LAYER_LAST);
 
   auto result = m_object_dispatches.insert(
@@ -229,7 +230,7 @@ void ObjectDispatcher<I>::shut_down_object_dispatch(
 
   ObjectDispatchMeta object_dispatch_meta;
   {
-    RWLock::WLocker locker(m_lock);
+    std::unique_lock locker{m_lock};
     auto it = m_object_dispatches.find(object_dispatch_layer);
     ceph_assert(it != m_object_dispatches.end());
 
@@ -310,13 +311,13 @@ void ObjectDispatcher<I>::send(ObjectDispatchSpec* object_dispatch_spec) {
   // apply the IO request to all layers -- this method will be re-invoked
   // by the dispatch layer if continuing / restarting the IO
   while (true) {
-    m_lock.get_read();
+    m_lock.lock_shared();
     object_dispatch_layer = object_dispatch_spec->object_dispatch_layer;
     auto it = m_object_dispatches.upper_bound(object_dispatch_layer);
     if (it == m_object_dispatches.end()) {
       // the request is complete if handled by all layers
       object_dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE;
-      m_lock.put_read();
+      m_lock.unlock_shared();
       break;
     }
 
@@ -326,7 +327,7 @@ void ObjectDispatcher<I>::send(ObjectDispatchSpec* object_dispatch_spec) {
 
     // prevent recursive locking back into the dispatcher while handling IO
     object_dispatch_meta.async_op_tracker->start_op();
-    m_lock.put_read();
+    m_lock.unlock_shared();
 
     // advance to next layer in case we skip or continue
     object_dispatch_spec->object_dispatch_layer =
index 0370d2684f32568e2bdd4179725e5055ecab9904..c949a97573fb83a4f6e66de664be4c4f17f35844 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
 
 #include "include/int_types.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
 #include "librbd/io/Types.h"
 #include <map>
 
@@ -71,7 +71,7 @@ private:
 
   ImageCtxT* m_image_ctx;
 
-  RWLock m_lock;
+  ceph::shared_mutex m_lock;
   std::map<ObjectDispatchLayer, ObjectDispatchMeta> m_object_dispatches;
 
   void send(ObjectDispatchSpec* object_dispatch_spec);
index 1c3ac150885acce7a66652d91a77c1827b5afdcd..b8845f6b7f4e20b6028ed7cb9e8e961dab883056 100644 (file)
@@ -5,8 +5,7 @@
 #include "common/ceph_context.h"
 #include "common/dout.h"
 #include "common/errno.h"
-#include "common/Mutex.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
 #include "common/WorkQueue.h"
 #include "include/Context.h"
 #include "include/err.h"
@@ -40,7 +39,7 @@ namespace {
 
 template <typename I>
 inline bool is_copy_on_read(I *ictx, librados::snap_t snap_id) {
-  RWLock::RLocker image_locker(ictx->image_lock);
+  std::shared_lock image_locker{ictx->image_lock};
   return (ictx->clone_copy_on_read &&
           !ictx->read_only && snap_id == CEPH_NOSNAP &&
           (ictx->exclusive_lock == nullptr ||
@@ -123,7 +122,7 @@ void ObjectRequest<I>::add_write_hint(I& image_ctx,
 template <typename I>
 bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents,
                                               bool read_request) {
-  ceph_assert(m_ictx->image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_ictx->image_lock));
 
   m_has_parent = false;
   parent_extents->clear();
@@ -197,7 +196,7 @@ template <typename I>
 void ObjectReadRequest<I>::read_object() {
   I *image_ctx = this->m_ictx;
   {
-    RWLock::RLocker image_locker(image_ctx->image_lock);
+    std::shared_lock image_locker{image_ctx->image_lock};
     if (image_ctx->object_map != nullptr &&
         !image_ctx->object_map->object_may_exist(this->m_object_no)) {
       image_ctx->op_work_queue->queue(new FunctionContext([this](int r) {
@@ -252,7 +251,7 @@ template <typename I>
 void ObjectReadRequest<I>::read_parent() {
   I *image_ctx = this->m_ictx;
 
-  RWLock::RLocker image_locker(image_ctx->image_lock);
+  std::shared_lock image_locker{image_ctx->image_lock};
 
   // calculate reverse mapping onto the image
   Extents parent_extents;
@@ -311,21 +310,21 @@ void ObjectReadRequest<I>::copyup() {
     return;
   }
 
-  image_ctx->owner_lock.get_read();
-  image_ctx->image_lock.get_read();
+  image_ctx->owner_lock.lock_shared();
+  image_ctx->image_lock.lock_shared();
   Extents parent_extents;
   if (!this->compute_parent_extents(&parent_extents, true) ||
       (image_ctx->exclusive_lock != nullptr &&
        !image_ctx->exclusive_lock->is_lock_owner())) {
-    image_ctx->image_lock.put_read();
-    image_ctx->owner_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
+    image_ctx->owner_lock.unlock_shared();
     this->finish(0);
     return;
   }
 
   ldout(image_ctx->cct, 20) << dendl;
 
-  image_ctx->copyup_list_lock.Lock();
+  image_ctx->copyup_list_lock.lock();
   auto it = image_ctx->copyup_list.find(this->m_object_no);
   if (it == image_ctx->copyup_list.end()) {
     // create and kick off a CopyupRequest
@@ -333,15 +332,15 @@ void ObjectReadRequest<I>::copyup() {
       image_ctx, this->m_object_no, std::move(parent_extents), this->m_trace);
 
     image_ctx->copyup_list[this->m_object_no] = new_req;
-    image_ctx->copyup_list_lock.Unlock();
-    image_ctx->image_lock.put_read();
+    image_ctx->copyup_list_lock.unlock();
+    image_ctx->image_lock.unlock_shared();
     new_req->send();
   } else {
-    image_ctx->copyup_list_lock.Unlock();
-    image_ctx->image_lock.put_read();
+    image_ctx->copyup_list_lock.unlock();
+    image_ctx->image_lock.unlock_shared();
   }
 
-  image_ctx->owner_lock.put_read();
+  image_ctx->owner_lock.unlock_shared();
   this->finish(0);
 }
 
@@ -365,17 +364,17 @@ AbstractObjectWriteRequest<I>::AbstractObjectWriteRequest(
 
   compute_parent_info();
 
-  ictx->image_lock.get_read();
+  ictx->image_lock.lock_shared();
   if (!ictx->migration_info.empty()) {
     m_guarding_migration_write = true;
   }
-  ictx->image_lock.put_read();
+  ictx->image_lock.unlock_shared();
 }
 
 template <typename I>
 void AbstractObjectWriteRequest<I>::compute_parent_info() {
   I *image_ctx = this->m_ictx;
-  RWLock::RLocker image_locker(image_ctx->image_lock);
+  std::shared_lock image_locker{image_ctx->image_lock};
 
   this->compute_parent_extents(&m_parent_extents, false);
 
@@ -389,7 +388,7 @@ template <typename I>
 void AbstractObjectWriteRequest<I>::add_write_hint(
     librados::ObjectWriteOperation *wr) {
   I *image_ctx = this->m_ictx;
-  RWLock::RLocker image_locker(image_ctx->image_lock);
+  std::shared_lock image_locker{image_ctx->image_lock};
   if (image_ctx->object_map == nullptr || !this->m_object_may_exist) {
     ObjectRequest<I>::add_write_hint(*image_ctx, wr);
   }
@@ -402,7 +401,7 @@ void AbstractObjectWriteRequest<I>::send() {
                             << this->m_object_off << "~" << this->m_object_len
                             << dendl;
   {
-    RWLock::RLocker image_lock(image_ctx->image_lock);
+    std::shared_lock image_lock{image_ctx->image_lock};
     if (image_ctx->object_map == nullptr) {
       m_object_may_exist = true;
     } else {
@@ -427,16 +426,16 @@ template <typename I>
 void AbstractObjectWriteRequest<I>::pre_write_object_map_update() {
   I *image_ctx = this->m_ictx;
 
-  image_ctx->image_lock.get_read();
+  image_ctx->image_lock.lock_shared();
   if (image_ctx->object_map == nullptr || !is_object_map_update_enabled()) {
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
     write_object();
     return;
   }
 
   if (!m_object_may_exist && m_copyup_enabled) {
     // optimization: copyup required
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
     copyup();
     return;
   }
@@ -450,11 +449,11 @@ void AbstractObjectWriteRequest<I>::pre_write_object_map_update() {
         &AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update>(
           CEPH_NOSNAP, this->m_object_no, new_state, {}, this->m_trace, false,
           this)) {
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
     return;
   }
 
-  image_ctx->image_lock.put_read();
+  image_ctx->image_lock.unlock_shared();
   write_object();
 }
 
@@ -515,9 +514,9 @@ void AbstractObjectWriteRequest<I>::handle_write_object(int r) {
       return;
     }
   } else if (r == -ERANGE && m_guarding_migration_write) {
-    image_ctx->image_lock.get_read();
+    image_ctx->image_lock.lock_shared();
     m_guarding_migration_write = !image_ctx->migration_info.empty();
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
 
     if (m_guarding_migration_write) {
       copyup();
@@ -549,7 +548,7 @@ void AbstractObjectWriteRequest<I>::copyup() {
   ceph_assert(!m_copyup_in_progress);
   m_copyup_in_progress = true;
 
-  image_ctx->copyup_list_lock.Lock();
+  image_ctx->copyup_list_lock.lock();
   auto it = image_ctx->copyup_list.find(this->m_object_no);
   if (it == image_ctx->copyup_list.end()) {
     auto new_req = CopyupRequest<I>::create(
@@ -561,11 +560,11 @@ void AbstractObjectWriteRequest<I>::copyup() {
     new_req->append_request(this);
     image_ctx->copyup_list[this->m_object_no] = new_req;
 
-    image_ctx->copyup_list_lock.Unlock();
+    image_ctx->copyup_list_lock.unlock();
     new_req->send();
   } else {
     it->second->append_request(this);
-    image_ctx->copyup_list_lock.Unlock();
+    image_ctx->copyup_list_lock.unlock();
   }
 }
 
@@ -596,10 +595,10 @@ template <typename I>
 void AbstractObjectWriteRequest<I>::post_write_object_map_update() {
   I *image_ctx = this->m_ictx;
 
-  image_ctx->image_lock.get_read();
+  image_ctx->image_lock.lock_shared();
   if (image_ctx->object_map == nullptr || !is_object_map_update_enabled() ||
       !is_non_existent_post_write_object_map_state()) {
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
     this->finish(0);
     return;
   }
@@ -613,11 +612,11 @@ void AbstractObjectWriteRequest<I>::post_write_object_map_update() {
         &AbstractObjectWriteRequest<I>::handle_post_write_object_map_update>(
           CEPH_NOSNAP, this->m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING,
           this->m_trace, false, this)) {
-    image_ctx->image_lock.put_read();
+    image_ctx->image_lock.unlock_shared();
     return;
   }
 
-  image_ctx->image_lock.put_read();
+  image_ctx->image_lock.unlock_shared();
   this->finish(0);
 }
 
index 6e1632c049cd67067c3a028412922351ac1c13ef..1abb8e3109ec6400075c4d01979d75bde19dd210 100644 (file)
@@ -140,7 +140,7 @@ void SimpleSchedulerObjectDispatch<I>::ObjectRequests::try_merge_delayed_request
 
 template <typename I>
 void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests(
-    I *image_ctx, LatencyStats *latency_stats, Mutex *latency_stats_lock) {
+    I *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock) {
   for (auto &it : m_delayed_requests) {
     auto offset = it.first;
     auto &merged_requests = it.second;
@@ -149,7 +149,7 @@ void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests
         [requests=std::move(merged_requests.requests), latency_stats,
          latency_stats_lock, start_time=ceph_clock_now()](int r) {
           if (latency_stats) {
-            Mutex::Locker locker(*latency_stats_lock);
+           std::lock_guard locker{*latency_stats_lock};
             auto latency = ceph_clock_now() - start_time;
             latency_stats->add(latency.to_nsec());
           }
@@ -167,15 +167,15 @@ void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests
     req->send();
   }
 
-  m_dispatch_time = utime_t();
+  m_dispatch_time = {};
 }
 
 template <typename I>
 SimpleSchedulerObjectDispatch<I>::SimpleSchedulerObjectDispatch(
     I* image_ctx)
   : m_image_ctx(image_ctx),
-    m_lock(librbd::util::unique_lock_name(
-      "librbd::io::SimpleSchedulerObjectDispatch::lock", this)),
+    m_lock(ceph::make_mutex(librbd::util::unique_lock_name(
+      "librbd::io::SimpleSchedulerObjectDispatch::lock", this))),
     m_max_delay(image_ctx->config.template get_val<uint64_t>(
       "rbd_io_scheduler_simple_max_delay")) {
   CephContext *cct = m_image_ctx->cct;
@@ -220,7 +220,7 @@ bool SimpleSchedulerObjectDispatch<I>::read(
   ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
                  << object_off << "~" << object_len << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   if (intersects(object_no, object_off, object_len)) {
     dispatch_delayed_requests(object_no);
   }
@@ -239,7 +239,7 @@ bool SimpleSchedulerObjectDispatch<I>::discard(
   ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
                  << object_off << "~" << object_len << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   dispatch_delayed_requests(object_no);
   register_in_flight_request(object_no, {}, on_finish);
 
@@ -257,7 +257,7 @@ bool SimpleSchedulerObjectDispatch<I>::write(
   ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
                  << object_off << "~" << data.length() << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   if (try_delay_write(object_no, object_off, std::move(data), snapc, op_flags,
                       *object_dispatch_flags, on_dispatched)) {
     *dispatch_result = DISPATCH_RESULT_COMPLETE;
@@ -282,7 +282,7 @@ bool SimpleSchedulerObjectDispatch<I>::write_same(
   ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
                  << object_off << "~" << object_len << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   dispatch_delayed_requests(object_no);
   register_in_flight_request(object_no, {}, on_finish);
 
@@ -301,7 +301,7 @@ bool SimpleSchedulerObjectDispatch<I>::compare_and_write(
   ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
                  << object_off << "~" << cmp_data.length() << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   dispatch_delayed_requests(object_no);
   register_in_flight_request(object_no, {}, on_finish);
 
@@ -316,7 +316,7 @@ bool SimpleSchedulerObjectDispatch<I>::flush(
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   dispatch_all_delayed_requests();
 
   return false;
@@ -325,7 +325,7 @@ bool SimpleSchedulerObjectDispatch<I>::flush(
 template <typename I>
 bool SimpleSchedulerObjectDispatch<I>::intersects(
     uint64_t object_no, uint64_t object_off, uint64_t len) const {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
 
   auto it = m_requests.find(object_no);
@@ -342,7 +342,7 @@ bool SimpleSchedulerObjectDispatch<I>::try_delay_write(
     uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
     const ::SnapContext &snapc, int op_flags, int object_dispatch_flags,
     Context* on_dispatched) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
 
   if (m_latency_stats && !m_latency_stats->is_ready()) {
@@ -365,11 +365,11 @@ bool SimpleSchedulerObjectDispatch<I>::try_delay_write(
 
   // schedule dispatch on the first request added
   if (delayed && !object_requests->is_scheduled_dispatch()) {
-    auto dispatch_time = ceph_clock_now();
+    auto dispatch_time = ceph::real_clock::now();
     if (m_latency_stats) {
-      dispatch_time += utime_t(0, m_latency_stats->avg() / 2);
+      dispatch_time += std::chrono::nanoseconds(m_latency_stats->avg() / 2);
     } else {
-      dispatch_time += utime_t(0, m_max_delay * 1000000);
+      dispatch_time += std::chrono::milliseconds(m_max_delay);
     }
     object_requests->set_scheduled_dispatch(dispatch_time);
     m_dispatch_queue.push_back(object_requests);
@@ -383,7 +383,7 @@ bool SimpleSchedulerObjectDispatch<I>::try_delay_write(
 
 template <typename I>
 void SimpleSchedulerObjectDispatch<I>::dispatch_all_delayed_requests() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
   ldout(cct, 20) << dendl;
 
@@ -408,7 +408,7 @@ void SimpleSchedulerObjectDispatch<I>::register_in_flight_request(
     [this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) {
       ctx->complete(r);
 
-      Mutex::Locker locker(m_lock);
+      std::lock_guard locker{m_lock};
       if (m_latency_stats && start_time != utime_t()) {
         auto latency = ceph_clock_now() - start_time;
         m_latency_stats->add(latency.to_nsec());
@@ -427,7 +427,7 @@ void SimpleSchedulerObjectDispatch<I>::register_in_flight_request(
 template <typename I>
 void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
     uint64_t object_no) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
 
   auto it = m_requests.find(object_no);
@@ -443,7 +443,7 @@ void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
 template <typename I>
 void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
     ObjectRequestsRef object_requests) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
 
   ldout(cct, 20) << "object_no=" << object_requests->get_object_no() << ", "
@@ -467,10 +467,10 @@ void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
 
 template <typename I>
 void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   auto cct = m_image_ctx->cct;
 
-  Mutex::Locker timer_locker(*m_timer_lock);
+  std::lock_guard timer_locker{*m_timer_lock};
 
   if (m_timer_task != nullptr) {
     ldout(cct, 20) << "canceling task " << m_timer_task << dendl;
@@ -500,7 +500,7 @@ void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
 
   m_timer_task = new FunctionContext(
     [this, object_no=object_requests->get_object_no()](int r) {
-      ceph_assert(m_timer_lock->is_locked());
+      ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
       auto cct = m_image_ctx->cct;
       ldout(cct, 20) << "running timer task " << m_timer_task << dendl;
 
@@ -508,7 +508,7 @@ void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
       m_image_ctx->op_work_queue->queue(
           new FunctionContext(
             [this, object_no](int r) {
-              Mutex::Locker locker(m_lock);
+             std::lock_guard locker{m_lock};
               dispatch_delayed_requests(object_no);
             }), 0);
     });
index 90f7747173e4cca299f7c671b2a6315e0c4379f4..e5a88371d7f3a19f2f1d34b9690adbbb05e2cca6 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
 #define CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
 
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/snap_types.h"
 #include "include/interval_set.h"
 #include "include/utime.h"
@@ -111,6 +111,8 @@ private:
 
   class ObjectRequests {
   public:
+    using clock_t = ceph::real_clock;
+
     ObjectRequests(uint64_t object_no) : m_object_no(object_no) {
     }
 
@@ -126,16 +128,16 @@ private:
       return m_dispatch_seq;
     }
 
-    utime_t get_dispatch_time() const {
+    clock_t::time_point get_dispatch_time() const {
       return m_dispatch_time;
     }
 
-    void set_scheduled_dispatch(const utime_t &dispatch_time) {
+    void set_scheduled_dispatch(const clock_t::time_point &dispatch_time) {
       m_dispatch_time = dispatch_time;
     }
 
     bool is_scheduled_dispatch() const {
-      return m_dispatch_time != utime_t();
+      return !clock_t::is_zero(m_dispatch_time);
     }
 
     size_t delayed_requests_size() const {
@@ -152,12 +154,12 @@ private:
 
     void dispatch_delayed_requests(ImageCtxT *image_ctx,
                                    LatencyStats *latency_stats,
-                                   Mutex *latency_stats_lock);
+                                   ceph::mutex *latency_stats_lock);
 
   private:
     uint64_t m_object_no;
     uint64_t m_dispatch_seq = 0;
-    utime_t m_dispatch_time;
+    clock_t::time_point m_dispatch_time;
     SnapContext m_snapc = {0, {}};
     int m_op_flags = 0;
     int m_object_dispatch_flags = 0;
@@ -174,9 +176,9 @@ private:
 
   ImageCtxT *m_image_ctx;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
   SafeTimer *m_timer;
-  Mutex *m_timer_lock;
+  ceph::mutex *m_timer_lock;
   uint64_t m_max_delay;
   uint64_t m_dispatch_seq = 0;
 
index c2b8cd9cf796e7d14366b5883c532edc30626877..8a4e40f88c442d37bdfae5653f87df37f8e987ba 100644 (file)
@@ -8,7 +8,7 @@
 #include "include/buffer.h"
 #include "include/rados/librados.hpp"
 #include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "librbd/ImageCtx.h"
 #include "journal/Journaler.h"
 #include "librbd/journal/Types.h"
@@ -73,7 +73,7 @@ private:
   bufferlist m_bl;
   Journaler *m_journaler;
   SafeTimer *m_timer;
-  Mutex *m_timer_lock;
+  ceph::mutex *m_timer_lock;
   int m_r_saved;
 
   int64_t m_pool_id = -1;
index 7656caac80909fc03c418799f9180b7ea4647f9e..649388266b08c6c7eb2cceb3434e7eeebb705cfb 100644 (file)
@@ -26,7 +26,7 @@ using librbd::util::create_context_callback;
 template <typename I>
 DemoteRequest<I>::DemoteRequest(I &image_ctx, Context *on_finish)
   : m_image_ctx(image_ctx), m_on_finish(on_finish),
-    m_lock("DemoteRequest::m_lock") {
+    m_lock(ceph::make_mutex("DemoteRequest::m_lock")) {
 }
 
 template <typename I>
index 5fea7f47b301cbdbeb92d990b4f1c971818b8372..6aba6cc8f473001063fd1c52b4d460b0ab213986 100644 (file)
@@ -4,7 +4,7 @@
 #ifndef CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
 #define CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
 
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "cls/journal/cls_journal_types.h"
 #include "journal/Future.h"
 #include "librbd/journal/Types.h"
@@ -69,7 +69,7 @@ private:
   Journaler *m_journaler = nullptr;
   int m_ret_val = 0;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
   ImageClientMeta m_client_meta;
   uint64_t m_tag_tid = 0;
   TagData m_tag_data;
index a5178de804c49bd9909c27e1ec6f3690d373374c..e0cde38d0c3542ee9e3a362e75584dac5b5e735d 100644 (file)
@@ -25,7 +25,7 @@ using librbd::util::create_context_callback;
 using util::C_DecodeTags;
 
 template <typename I>
-OpenRequest<I>::OpenRequest(I *image_ctx, Journaler *journaler, Mutex *lock,
+OpenRequest<I>::OpenRequest(I *image_ctx, Journaler *journaler, ceph::mutex *lock,
                             journal::ImageClientMeta *client_meta,
                             uint64_t *tag_tid, journal::TagData *tag_data,
                             Context *on_finish)
@@ -97,7 +97,7 @@ void OpenRequest<I>::handle_init(int r) {
 
   m_tag_class = image_client_meta->tag_class;
   {
-    Mutex::Locker locker(*m_lock);
+    std::lock_guard locker{*m_lock};
     *m_client_meta = *image_client_meta;
   }
 
index f71d8c536768870e1aaf79124ecf1101e82316b2..0f10bccf1ade46d5319f40354928f65e338d42a6 100644 (file)
@@ -4,11 +4,11 @@
 #ifndef CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
 #define CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
 
+#include "common/ceph_mutex.h"
 #include "include/int_types.h"
 #include "librbd/journal/TypeTraits.h"
 
 struct Context;
-struct Mutex;
 
 namespace librbd {
 
@@ -25,14 +25,14 @@ public:
   typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
 
   static OpenRequest* create(ImageCtxT *image_ctx, Journaler *journaler,
-                             Mutex *lock, journal::ImageClientMeta *client_meta,
+                             ceph::mutex *lock, journal::ImageClientMeta *client_meta,
                              uint64_t *tag_tid, journal::TagData *tag_data,
                              Context *on_finish) {
     return new OpenRequest(image_ctx, journaler, lock, client_meta, tag_tid,
                            tag_data, on_finish);
   }
 
-  OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, Mutex *lock,
+  OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, ceph::mutex *lock,
               journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
               journal::TagData *tag_data, Context *on_finish);
 
@@ -59,7 +59,7 @@ private:
 
   ImageCtxT *m_image_ctx;
   Journaler *m_journaler;
-  Mutex *m_lock;
+  ceph::mutex *m_lock;
   journal::ImageClientMeta *m_client_meta;
   uint64_t *m_tag_tid;
   journal::TagData *m_tag_data;
index 22dc83a32d860ddb28c2212f9646813b5f535ab5..703a8a075bc06d0ba0120d02968d9459ddc51a2e 100644 (file)
@@ -26,7 +26,7 @@ using librbd::util::create_context_callback;
 template <typename I>
 PromoteRequest<I>::PromoteRequest(I *image_ctx, bool force, Context *on_finish)
   : m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish),
-    m_lock("PromoteRequest::m_lock") {
+    m_lock(ceph::make_mutex("PromoteRequest::m_lock")) {
 }
 
 template <typename I>
index 0d01f596108d87502bfc9794b23d77bd1cfe2d3e..f6258066e7e0322cd8aa6b8ebe8202b9097052dc 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H
 
 #include "include/int_types.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "cls/journal/cls_journal_types.h"
 #include "journal/Future.h"
 #include "librbd/journal/Types.h"
@@ -71,7 +71,7 @@ private:
   Journaler *m_journaler = nullptr;
   int m_ret_val = 0;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
   ImageClientMeta m_client_meta;
   uint64_t m_tag_tid = 0;
   TagData m_tag_data;
index 594e62d5eb89a6c44bd432b2229c581ab8c89b65..13dff87a08e7e548675eb2ad7781e6564d7ef2e7 100644 (file)
@@ -8,7 +8,6 @@
 #include "include/buffer.h"
 #include "include/rados/librados.hpp"
 #include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
 #include "librbd/ImageCtx.h"
 #include "journal/Journaler.h"
 #include "librbd/journal/TypeTraits.h"
@@ -58,7 +57,7 @@ private:
   CephContext *m_cct;
   Journaler *m_journaler;
   SafeTimer *m_timer;
-  Mutex *m_timer_lock;
+  ceph::mutex *m_timer_lock;
   int m_r_saved;
 
   void stat_journal();
index 8f850ab5369ac7d906cd5f56f1e58609f40d6cda..d2d71754559034ce1bcdc48521fd044f41082f1e 100644 (file)
@@ -118,7 +118,7 @@ struct ExecuteOp : public Context {
     }
 
     ldout(cct, 20) << ": ExecuteOp::" << __func__ << dendl;
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
 
     if (image_ctx.exclusive_lock == nullptr ||
         !image_ctx.exclusive_lock->accept_ops()) {
@@ -173,7 +173,7 @@ struct C_RefreshIfRequired : public Context {
 
 template <typename I>
 Replay<I>::Replay(I &image_ctx)
-  : m_image_ctx(image_ctx), m_lock("Replay<I>::m_lock") {
+  : m_image_ctx(image_ctx) {
 }
 
 template <typename I>
@@ -206,7 +206,7 @@ void Replay<I>::process(const EventEntry &event_entry,
 
   on_ready = util::create_async_context_callback(m_image_ctx, on_ready);
 
-  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+  std::shared_lock owner_lock{m_image_ctx.owner_lock};
   if (m_image_ctx.exclusive_lock == nullptr ||
       !m_image_ctx.exclusive_lock->accept_ops()) {
     ldout(cct, 5) << ": lost exclusive lock -- skipping event" << dendl;
@@ -229,7 +229,7 @@ void Replay<I>::shut_down(bool cancel_ops, Context *on_finish) {
     m_image_ctx, on_finish);
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
 
     // safely commit any remaining AIO modify operations
     if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) {
@@ -270,7 +270,7 @@ void Replay<I>::shut_down(bool cancel_ops, Context *on_finish) {
 
   // execute the following outside of lock scope
   if (flush_comp != nullptr) {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
                                    io::FLUSH_SOURCE_INTERNAL, {});
   }
@@ -283,7 +283,7 @@ template <typename I>
 void Replay<I>::flush(Context *on_finish) {
   io::AioCompletion *aio_comp;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     aio_comp = create_aio_flush_completion(
       util::create_async_context_callback(m_image_ctx, on_finish));
     if (aio_comp == nullptr) {
@@ -291,7 +291,7 @@ void Replay<I>::flush(Context *on_finish) {
     }
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
                                  io::FLUSH_SOURCE_INTERNAL, {});
 }
@@ -301,7 +301,7 @@ void Replay<I>::replay_op_ready(uint64_t op_tid, Context *on_resume) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": op_tid=" << op_tid << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   auto op_it = m_op_events.find(op_tid);
   ceph_assert(op_it != m_op_events.end());
 
@@ -357,9 +357,9 @@ void Replay<I>::handle_event(const journal::AioDiscardEvent &event,
   }
 
   if (flush_required) {
-    m_lock.Lock();
+    m_lock.lock();
     auto flush_comp = create_aio_flush_completion(nullptr);
-    m_lock.Unlock();
+    m_lock.unlock();
 
     if (flush_comp != nullptr) {
       io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
@@ -391,9 +391,9 @@ void Replay<I>::handle_event(const journal::AioWriteEvent &event,
   }
 
   if (flush_required) {
-    m_lock.Lock();
+    m_lock.lock();
     auto flush_comp = create_aio_flush_completion(nullptr);
-    m_lock.Unlock();
+    m_lock.unlock();
 
     if (flush_comp != nullptr) {
       io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
@@ -410,7 +410,7 @@ void Replay<I>::handle_event(const journal::AioFlushEvent &event,
 
   io::AioCompletion *aio_comp;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     aio_comp = create_aio_flush_completion(on_safe);
   }
 
@@ -444,9 +444,9 @@ void Replay<I>::handle_event(const journal::AioWriteSameEvent &event,
   }
 
   if (flush_required) {
-    m_lock.Lock();
+    m_lock.lock();
     auto flush_comp = create_aio_flush_completion(nullptr);
-    m_lock.Unlock();
+    m_lock.unlock();
 
     if (flush_comp != nullptr) {
       io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
@@ -478,9 +478,9 @@ void Replay<I>::handle_event(const journal::AioWriteSameEvent &event,
   }
 
   if (flush_required) {
-    m_lock.Lock();
+    m_lock.lock();
     auto flush_comp = create_aio_flush_completion(nullptr);
-    m_lock.Unlock();
+    m_lock.unlock();
 
     io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
                                    io::FLUSH_SOURCE_INTERNAL, {});
@@ -499,7 +499,7 @@ void Replay<I>::handle_event(const journal::OpFinishEvent &event,
   Context *on_op_complete = nullptr;
   Context *on_op_finish_event = nullptr;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     auto op_it = m_op_events.find(event.op_tid);
     if (op_it == m_op_events.end()) {
       ldout(cct, 10) << ": unable to locate associated op: assuming previously "
@@ -548,7 +548,7 @@ void Replay<I>::handle_event(const journal::SnapCreateEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap create event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -577,7 +577,7 @@ void Replay<I>::handle_event(const journal::SnapRemoveEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap remove event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -601,7 +601,7 @@ void Replay<I>::handle_event(const journal::SnapRenameEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap rename event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -625,7 +625,7 @@ void Replay<I>::handle_event(const journal::SnapProtectEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap protect event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -649,7 +649,7 @@ void Replay<I>::handle_event(const journal::SnapUnprotectEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap unprotect event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -677,7 +677,7 @@ void Replay<I>::handle_event(const journal::SnapRollbackEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap rollback start event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -699,7 +699,7 @@ void Replay<I>::handle_event(const journal::RenameEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Rename event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -723,7 +723,7 @@ void Replay<I>::handle_event(const journal::ResizeEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Resize start event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -748,7 +748,7 @@ void Replay<I>::handle_event(const journal::FlattenEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Flatten start event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -781,7 +781,7 @@ void Replay<I>::handle_event(const journal::SnapLimitEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Snap limit event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -805,7 +805,7 @@ void Replay<I>::handle_event(const journal::UpdateFeaturesEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Update features event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -830,7 +830,7 @@ void Replay<I>::handle_event(const journal::MetadataSetEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Metadata set event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -852,7 +852,7 @@ void Replay<I>::handle_event(const journal::MetadataRemoveEvent &event,
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": Metadata remove event" << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   OpEvent *op_event;
   Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
                                                        on_safe, &op_event);
@@ -883,7 +883,7 @@ void Replay<I>::handle_event(const journal::UnknownEvent &event,
 template <typename I>
 void Replay<I>::handle_aio_modify_complete(Context *on_ready, Context *on_safe,
                                            int r, std::set<int> &filters) {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << ": on_ready=" << on_ready << ", "
                  << "on_safe=" << on_safe << ", r=" << r << dendl;
@@ -918,7 +918,7 @@ void Replay<I>::handle_aio_flush_complete(Context *on_flush_safe,
   Context *on_aio_ready = nullptr;
   Context *on_flush = nullptr;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_in_flight_aio_flush > 0);
     ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size());
     --m_in_flight_aio_flush;
@@ -972,7 +972,7 @@ Context *Replay<I>::create_op_context_callback(uint64_t op_tid,
     return nullptr;
   }
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   if (m_op_events.count(op_tid) != 0) {
     lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl;
 
@@ -1001,7 +1001,7 @@ void Replay<I>::handle_op_complete(uint64_t op_tid, int r) {
   OpEvent op_event;
   bool shutting_down = false;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     auto op_it = m_op_events.find(op_tid);
     ceph_assert(op_it != m_op_events.end());
 
@@ -1049,7 +1049,7 @@ void Replay<I>::handle_op_complete(uint64_t op_tid, int r) {
   // dropped -- handle if pending
   Context *on_flush = nullptr;
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_in_flight_op_events > 0);
     --m_in_flight_op_events;
     if (m_in_flight_op_events == 0 &&
@@ -1069,7 +1069,7 @@ Replay<I>::create_aio_modify_completion(Context *on_ready,
                                         io::aio_type_t aio_type,
                                         bool *flush_required,
                                         std::set<int> &&filters) {
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   CephContext *cct = m_image_ctx.cct;
   ceph_assert(m_on_aio_ready == nullptr);
 
@@ -1117,7 +1117,7 @@ Replay<I>::create_aio_modify_completion(Context *on_ready,
 
 template <typename I>
 io::AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_safe) {
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   CephContext *cct = m_image_ctx.cct;
   if (m_shut_down) {
@@ -1143,9 +1143,9 @@ template <typename I>
 bool Replay<I>::clipped_io(uint64_t image_offset, io::AioCompletion *aio_comp) {
   CephContext *cct = m_image_ctx.cct;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   size_t image_size = m_image_ctx.size;
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 
   if (image_offset >= image_size) {
     // rbd-mirror image sync might race an IO event w/ associated resize between
index 6e058ddb35b7d0db8eafc83d569218fc4cf685ab..038601833cd97120f7acb91bcc86167e88d7c3d4 100644 (file)
@@ -7,7 +7,7 @@
 #include "include/int_types.h"
 #include "include/buffer_fwd.h"
 #include "include/Context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "librbd/io/Types.h"
 #include "librbd/journal/Types.h"
 #include <boost/variant.hpp>
@@ -119,7 +119,7 @@ private:
 
   ImageCtxT &m_image_ctx;
 
-  Mutex m_lock;
+  ceph::mutex m_lock = ceph::make_mutex("Replay<I>::m_lock");
 
   uint64_t m_in_flight_aio_flush = 0;
   uint64_t m_in_flight_aio_modify = 0;
index 9cc8e2e437c13c2245b3bddd4261a6bd72c771cd..44f5ac8a6bd7e07545b565eec8b56f10217f33aa 100644 (file)
@@ -8,7 +8,6 @@
 #include "include/buffer.h"
 #include "include/rados/librados.hpp"
 #include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
 #include "librbd/journal/TypeTraits.h"
 #include <string>
 
index 1721a9b2c9410b4fbdcc5f6cb6b4aee519ab72b7..231bcae2d3209e0cb4aaacae7bde806dbd3c72be 100644 (file)
@@ -32,7 +32,7 @@ int C_DecodeTag::process(int r) {
     return r;
   }
 
-  Mutex::Locker locker(*lock);
+  std::lock_guard locker{*lock};
   *tag_tid = tag.tid;
 
   auto data_it = tag.data.cbegin();
@@ -64,7 +64,7 @@ int C_DecodeTags::process(int r) {
     return -ENOENT;
   }
 
-  Mutex::Locker locker(*lock);
+  std::lock_guard locker{*lock};
   *tag_tid = tags.back().tid;
   auto data_it = tags.back().data.cbegin();
   r = C_DecodeTag::decode(&data_it, tag_data);
index 63d37c03726eebd8265749f5e18215ada354f029..d22044d10375892b6a2abd0f0cf0e93b45227d0a 100644 (file)
@@ -10,7 +10,6 @@
 #include <list>
 
 struct CephContext;
-struct Mutex;
 
 namespace librbd {
 namespace journal {
@@ -21,14 +20,14 @@ namespace util {
 
 struct C_DecodeTag : public Context {
   CephContext *cct;
-  Mutex *lock;
+  ceph::mutex *lock;
   uint64_t *tag_tid;
   TagData *tag_data;
   Context *on_finish;
 
   cls::journal::Tag tag;
 
-  C_DecodeTag(CephContext *cct, Mutex *lock, uint64_t *tag_tid,
+  C_DecodeTag(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
               TagData *tag_data, Context *on_finish)
     : cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
       on_finish(on_finish) {
@@ -51,14 +50,14 @@ struct C_DecodeTags : public Context {
   typedef std::list<cls::journal::Tag> Tags;
 
   CephContext *cct;
-  Mutex *lock;
+  ceph::mutex *lock;
   uint64_t *tag_tid;
   TagData *tag_data;
   Context *on_finish;
 
   Tags tags;
 
-  C_DecodeTags(CephContext *cct, Mutex *lock, uint64_t *tag_tid,
+  C_DecodeTags(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
                TagData *tag_data, Context *on_finish)
     : cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
       on_finish(on_finish) {
index 47f5cd5a7cc8f240f3a725b8b2a918b74deeda00..6c303c963fa49db1e06259d9dc2e047c4b25e5a0 100644 (file)
@@ -1430,7 +1430,7 @@ namespace librbd {
     tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
                ictx->read_only);
     {
-      RWLock::RLocker timestamp_locker(ictx->timestamp_lock);
+      std::shared_lock timestamp_locker{ictx->timestamp_lock};
       utime_t time = ictx->get_access_timestamp();
       time.to_timespec(timestamp);
     }
@@ -1444,7 +1444,7 @@ namespace librbd {
     tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
                ictx->read_only);
     {
-      RWLock::RLocker timestamp_locker(ictx->timestamp_lock);
+      std::shared_lock timestamp_locker{ictx->timestamp_lock};
       utime_t time = ictx->get_modify_timestamp();
       time.to_timespec(timestamp);
     }
index c5d38752ea9200b4c1f73a0f7d96210eb7e69c38..0e194dbfb173fdc7dbc73dd8162615bf7c71125b 100644 (file)
@@ -66,9 +66,9 @@ template <typename I>
 void DemoteRequest<I>::acquire_lock() {
   CephContext *cct = m_image_ctx.cct;
 
-  m_image_ctx.owner_lock.get_read();
+  m_image_ctx.owner_lock.lock_shared();
   if (m_image_ctx.exclusive_lock == nullptr) {
-    m_image_ctx.owner_lock.put_read();
+    m_image_ctx.owner_lock.unlock_shared();
     lderr(cct) << "exclusive lock is not active" << dendl;
     finish(-EINVAL);
     return;
@@ -80,7 +80,7 @@ void DemoteRequest<I>::acquire_lock() {
   m_blocked_requests = true;
 
   if (m_image_ctx.exclusive_lock->is_lock_owner()) {
-    m_image_ctx.owner_lock.put_read();
+    m_image_ctx.owner_lock.unlock_shared();
     demote();
     return;
   }
@@ -90,7 +90,7 @@ void DemoteRequest<I>::acquire_lock() {
   auto ctx = create_context_callback<
     DemoteRequest<I>, &DemoteRequest<I>::handle_acquire_lock>(this);
   m_image_ctx.exclusive_lock->acquire_lock(ctx);
-  m_image_ctx.owner_lock.put_read();
+  m_image_ctx.owner_lock.unlock_shared();
 }
 
 template <typename I>
@@ -104,16 +104,16 @@ void DemoteRequest<I>::handle_acquire_lock(int r) {
     return;
   }
 
-  m_image_ctx.owner_lock.get_read();
+  m_image_ctx.owner_lock.lock_shared();
   if (m_image_ctx.exclusive_lock != nullptr &&
       !m_image_ctx.exclusive_lock->is_lock_owner()) {
     r = m_image_ctx.exclusive_lock->get_unlocked_op_error();
-    m_image_ctx.owner_lock.put_read();
+    m_image_ctx.owner_lock.unlock_shared();
     lderr(cct) << "failed to acquire exclusive lock" << dendl;
     finish(r);
     return;
   }
-  m_image_ctx.owner_lock.put_read();
+  m_image_ctx.owner_lock.unlock_shared();
 
   demote();
 }
@@ -146,9 +146,9 @@ void DemoteRequest<I>::release_lock() {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 20) << dendl;
 
-  m_image_ctx.owner_lock.get_read();
+  m_image_ctx.owner_lock.lock_shared();
   if (m_image_ctx.exclusive_lock == nullptr) {
-    m_image_ctx.owner_lock.put_read();
+    m_image_ctx.owner_lock.unlock_shared();
     finish(0);
     return;
   }
@@ -156,7 +156,7 @@ void DemoteRequest<I>::release_lock() {
   auto ctx = create_context_callback<
     DemoteRequest<I>, &DemoteRequest<I>::handle_release_lock>(this);
   m_image_ctx.exclusive_lock->release_lock(ctx);
-  m_image_ctx.owner_lock.put_read();
+  m_image_ctx.owner_lock.unlock_shared();
 }
 
 template <typename I>
@@ -179,7 +179,7 @@ void DemoteRequest<I>::finish(int r) {
   }
 
   {
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
     if (m_blocked_requests && m_image_ctx.exclusive_lock != nullptr) {
       m_image_ctx.exclusive_lock->unblock_requests();
     }
index 6a21c5608d7a470087431f139352c3b4292a6a66..ad9a525c9bd2ac7270b60b9aa018c9afb328df1a 100644 (file)
@@ -28,7 +28,7 @@ template <typename I>
 DisableRequest<I>::DisableRequest(I *image_ctx, bool force, bool remove,
                                   Context *on_finish)
   : m_image_ctx(image_ctx), m_force(force), m_remove(remove),
-    m_on_finish(on_finish), m_lock("mirror::DisableRequest::m_lock") {
+    m_on_finish(on_finish) {
 }
 
 template <typename I>
@@ -239,7 +239,7 @@ Context *DisableRequest<I>::handle_get_clients(int *result) {
     return m_on_finish;
   }
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   ceph_assert(m_current_ops.empty());
 
@@ -306,7 +306,7 @@ void DisableRequest<I>::send_remove_snap(const std::string &client_id,
   ldout(cct, 10) << this << " " << __func__ << ": client_id=" << client_id
                  << ", snap_name=" << snap_name << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
 
   m_current_ops[client_id]++;
 
@@ -328,7 +328,7 @@ Context *DisableRequest<I>::handle_remove_snap(int *result,
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
 
   ceph_assert(m_current_ops[client_id] > 0);
   m_current_ops[client_id]--;
@@ -353,7 +353,7 @@ void DisableRequest<I>::send_unregister_client(
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
 
-  ceph_assert(m_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_lock));
   ceph_assert(m_current_ops[client_id] == 0);
 
   Context *ctx = create_context_callback(
@@ -381,7 +381,7 @@ Context *DisableRequest<I>::handle_unregister_client(
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_current_ops[client_id] == 0);
   m_current_ops.erase(client_id);
 
index 1a3b122339c8c8e7a7693c0d948a8dd127cbf0a9..a3eeee786764d53ff9dde51e06c15fd22b98803d 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H
 
 #include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "cls/journal/cls_journal_types.h"
 #include "cls/rbd/cls_rbd_types.h"
 #include <map>
@@ -92,7 +92,8 @@ private:
   std::map<std::string, int> m_ret;
   std::map<std::string, int> m_current_ops;
   int m_error_result = 0;
-  mutable Mutex m_lock;
+  mutable ceph::mutex m_lock =
+    ceph::make_mutex("mirror::DisableRequest::m_lock");
 
   void send_get_mirror_image();
   Context *handle_get_mirror_image(int *result);
index 6576fb2c2e4c303a6d34fca13559eabc8daef369..d26f929fa7bef673d27361b183a577c625fdafa0 100644 (file)
@@ -33,7 +33,7 @@ void CreateRequest<I>::send() {
   uint64_t max_size = m_image_ctx->size;
 
   {
-    RWLock::WLocker image_locker(m_image_ctx->image_lock);
+    std::unique_lock image_locker{m_image_ctx->image_lock};
     m_snap_ids.push_back(CEPH_NOSNAP);
     for (auto it : m_image_ctx->snap_info) {
       max_size = std::max(max_size, it.second.size);
index 6929abe7aad06ac21220e0db05d7884abc6718d7..33984cda1b1efc71a51c135e886f3c6633fae0f9 100644 (file)
@@ -5,7 +5,6 @@
 #define CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H
 
 #include "include/buffer.h"
-#include "common/Mutex.h"
 #include <map>
 #include <string>
 
index c610e2b0f0684c5df908f018da630f94039c3320..bf2db96609817b52728830ccae2e5758ce820545 100644 (file)
@@ -23,8 +23,8 @@ InvalidateRequest<I>* InvalidateRequest<I>::create(I &image_ctx,
 template <typename I>
 void InvalidateRequest<I>::send() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
-  ceph_assert(image_ctx.image_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_wlocked(image_ctx.image_lock));
 
   uint64_t snap_flags;
   int r = image_ctx.get_flags(m_snap_id, &snap_flags);
index e061312e93845c1c84fd272af2fbbf3cfb820824..1af868d287343feadc7013f373a7d622b526f476 100644 (file)
@@ -25,7 +25,7 @@ using util::create_rados_callback;
 namespace object_map {
 
 template <typename I>
-RefreshRequest<I>::RefreshRequest(I &image_ctx, RWLock* object_map_lock,
+RefreshRequest<I>::RefreshRequest(I &image_ctx, ceph::shared_mutex* object_map_lock,
                                   ceph::BitVector<2> *object_map,
                                   uint64_t snap_id, Context *on_finish)
   : m_image_ctx(image_ctx), m_object_map_lock(object_map_lock),
@@ -36,7 +36,7 @@ RefreshRequest<I>::RefreshRequest(I &image_ctx, RWLock* object_map_lock,
 template <typename I>
 void RefreshRequest<I>::send() {
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     m_object_count = Striper::get_num_objects(
       m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
   }
@@ -52,13 +52,13 @@ template <typename I>
 void RefreshRequest<I>::apply() {
   uint64_t num_objs;
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
     num_objs = Striper::get_num_objects(
       m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
   }
   ceph_assert(m_on_disk_object_map.size() >= num_objs);
 
-  RWLock::WLocker object_map_locker(*m_object_map_lock);
+  std::unique_lock object_map_locker{*m_object_map_lock};
   *m_object_map = m_on_disk_object_map;
 }
 
@@ -173,8 +173,8 @@ void RefreshRequest<I>::send_invalidate() {
   InvalidateRequest<I> *req = InvalidateRequest<I>::create(
     m_image_ctx, m_snap_id, true, ctx);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   req->send();
 }
 
@@ -207,8 +207,8 @@ void RefreshRequest<I>::send_resize_invalidate() {
   InvalidateRequest<I> *req = InvalidateRequest<I>::create(
     m_image_ctx, m_snap_id, true, ctx);
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   req->send();
 }
 
@@ -278,8 +278,8 @@ void RefreshRequest<I>::send_invalidate_and_close() {
     m_image_ctx, m_snap_id, false, ctx);
 
   lderr(cct) << "object map too large: " << m_object_count << dendl;
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   req->send();
 }
 
@@ -295,7 +295,7 @@ Context *RefreshRequest<I>::handle_invalidate_and_close(int *ret_val) {
     *ret_val = -EFBIG;
   }
 
-  RWLock::WLocker object_map_locker(*m_object_map_lock);
+  std::unique_lock object_map_locker{*m_object_map_lock};
   m_object_map->clear();
   return m_on_finish;
 }
index 3e83dd3eee943218e71d786e6ea12298b0f403e0..0bca85079d3cc05351ef4a0f1e3f3cabcdc75d82 100644 (file)
@@ -7,6 +7,7 @@
 #include "include/int_types.h"
 #include "include/buffer.h"
 #include "common/bit_vector.hpp"
+#include "common/ceph_mutex.h"
 
 class Context;
 class RWLock;
@@ -20,14 +21,15 @@ namespace object_map {
 template <typename ImageCtxT = ImageCtx>
 class RefreshRequest {
 public:
-  static RefreshRequest *create(ImageCtxT &image_ctx, RWLock* object_map_lock,
+  static RefreshRequest *create(ImageCtxT &image_ctx,
+                               ceph::shared_mutex* object_map_lock,
                                 ceph::BitVector<2> *object_map,
                                 uint64_t snap_id, Context *on_finish) {
     return new RefreshRequest(image_ctx, object_map_lock, object_map, snap_id,
                               on_finish);
   }
 
-  RefreshRequest(ImageCtxT &image_ctx, RWLock* object_map_lock,
+  RefreshRequest(ImageCtxT &image_ctx, ceph::shared_mutex* object_map_lock,
                  ceph::BitVector<2> *object_map, uint64_t snap_id,
                  Context *on_finish);
 
@@ -61,7 +63,7 @@ private:
    */
 
   ImageCtxT &m_image_ctx;
-  RWLock* m_object_map_lock;
+  ceph::shared_mutex* m_object_map_lock;
   ceph::BitVector<2> *m_object_map;
   uint64_t m_snap_id;
   Context *m_on_finish;
index bfd80f51d8b2bea0bdbbfe5b98ed154dc4a70bfd..a718d81fc50106f2428d2ceb0cfd763dd9f8783b 100644 (file)
@@ -21,8 +21,7 @@ using util::create_rados_callback;
 
 template <typename I>
 RemoveRequest<I>::RemoveRequest(I *image_ctx, Context *on_finish)
-  : m_image_ctx(image_ctx), m_on_finish(on_finish),
-    m_lock("object_map::RemoveRequest::m_lock") {
+  : m_image_ctx(image_ctx), m_on_finish(on_finish) {
 }
 
 template <typename I>
@@ -35,14 +34,14 @@ void RemoveRequest<I>::send_remove_object_map() {
   CephContext *cct = m_image_ctx->cct;
   ldout(cct, 20) << __func__ << dendl;
 
-  RWLock::WLocker image_locker(m_image_ctx->image_lock);
+  std::unique_lock image_locker{m_image_ctx->image_lock};
   std::vector<uint64_t> snap_ids;
   snap_ids.push_back(CEPH_NOSNAP);
   for (auto it : m_image_ctx->snap_info) {
     snap_ids.push_back(it.first);
   }
 
-  Mutex::Locker locker(m_lock);
+  std::lock_guard locker{m_lock};
   ceph_assert(m_ref_counter == 0);
 
   for (auto snap_id : snap_ids) {
@@ -64,7 +63,7 @@ Context *RemoveRequest<I>::handle_remove_object_map(int *result) {
   ldout(cct, 20) << __func__ << ": r=" << *result << dendl;
 
   {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     ceph_assert(m_ref_counter > 0);
     m_ref_counter--;
 
index 1353ef9b70422d857aa340a8c32ddeacd009f4fc..ce82e603c9ca092a5c54fc99d32c9311c9ad1774 100644 (file)
@@ -5,7 +5,7 @@
 #define CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H
 
 #include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include <map>
 #include <string>
 
@@ -48,7 +48,8 @@ private:
 
   int m_error_result = 0;
   int m_ref_counter = 0;
-  mutable Mutex m_lock;
+  mutable ceph::mutex m_lock =
+    ceph::make_mutex("object_map::RemoveRequest::m_lock");
 
   void send_remove_object_map();
   Context *handle_remove_object_map(int *result);
index 8690272292309d345b36196a070b73a7a9b1515c..e323251878afb2c45acf20d6e304e49eacf79cef 100644 (file)
@@ -57,8 +57,8 @@ bool Request::invalidate() {
 
   m_state = STATE_INVALIDATE;
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx, m_snap_id,
                                                      true,
                                                      create_callback_context());
index 8f0f1da51c09d2b18659f568c7d67df2cba0cffc..89e39b670d8a5f8301df7c3ac2144d61e39b19bb 100644 (file)
@@ -32,7 +32,7 @@ void ResizeRequest::resize(ceph::BitVector<2> *object_map, uint64_t num_objs,
 void ResizeRequest::send() {
   CephContext *cct = m_image_ctx.cct;
 
-  RWLock::WLocker l(*m_object_map_lock);
+  std::unique_lock l{*m_object_map_lock};
   m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
 
   std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
@@ -57,7 +57,7 @@ void ResizeRequest::finish_request() {
   ldout(cct, 5) << this << " resizing in-memory object map: "
                << m_num_objs << dendl;
 
-  RWLock::WLocker object_map_locker(*m_object_map_lock);
+  std::unique_lock object_map_locker{*m_object_map_lock};
   resize(m_object_map, m_num_objs, m_default_object_state);
 }
 
index eda8f2f097eb020b1012cd269f329a0ed77ddf93..dccdef133cee0b067574ed44f79052b64aed5747 100644 (file)
@@ -19,7 +19,7 @@ namespace object_map {
 
 class ResizeRequest : public Request {
 public:
-  ResizeRequest(ImageCtx &image_ctx, RWLock *object_map_lock,
+  ResizeRequest(ImageCtx &image_ctx, ceph::shared_mutex *object_map_lock,
                 ceph::BitVector<2> *object_map, uint64_t snap_id,
                 uint64_t new_size, uint8_t default_object_state,
                 Context *on_finish)
@@ -39,7 +39,7 @@ protected:
   void finish_request() override;
 
 private:
-  RWLock* m_object_map_lock;
+  ceph::shared_mutex* m_object_map_lock;
   ceph::BitVector<2> *m_object_map;
   uint64_t m_num_objs;
   uint64_t m_new_size;
index 2421adf9cbe029d57cf0d2cc7a96be174fab2c2f..729af867fd20af600100dc3d5ba850ce6267535d 100644 (file)
@@ -54,7 +54,7 @@ bool SnapshotCreateRequest::should_complete(int r) {
     return Request::should_complete(r);
   }
 
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
   bool finished = false;
   switch (m_state) {
   case STATE_READ_MAP:
@@ -75,7 +75,7 @@ bool SnapshotCreateRequest::should_complete(int r) {
 }
 
 void SnapshotCreateRequest::send_read_map() {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
 
   CephContext *cct = m_image_ctx.cct;
   std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
@@ -110,7 +110,7 @@ void SnapshotCreateRequest::send_write_map() {
 }
 
 bool SnapshotCreateRequest::send_add_snapshot() {
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock image_locker{m_image_ctx.image_lock};
   if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) == 0) {
     return true;
   }
@@ -132,7 +132,7 @@ bool SnapshotCreateRequest::send_add_snapshot() {
 }
 
 void SnapshotCreateRequest::update_object_map() {
-  RWLock::WLocker object_map_locker(*m_object_map_lock);
+  std::unique_lock object_map_locker{*m_object_map_lock};
 
   auto it = m_object_map.begin();
   auto end_it = m_object_map.end();
index 757833acf068f2c09fc4942d609ae76c0f899be1..3074d059d577dfc7f7d53982ffad31a9258f410e 100644 (file)
@@ -45,7 +45,7 @@ public:
     STATE_ADD_SNAPSHOT
   };
 
-  SnapshotCreateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+  SnapshotCreateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
                         ceph::BitVector<2> *object_map, uint64_t snap_id,
                         Context *on_finish)
     : Request(image_ctx, snap_id, on_finish),
@@ -59,7 +59,7 @@ protected:
   bool should_complete(int r) override;
 
 private:
-  RWLock* m_object_map_lock;
+  ceph::shared_mutex* m_object_map_lock;
   ceph::BitVector<2> &m_object_map;
 
   State m_state = STATE_READ_MAP;
index 42d3ca7ab683e77c92ff5f5dc5393354611b58c9..a29d78f74c72484513c98e494b23b29857c0c75b 100644 (file)
@@ -18,8 +18,8 @@ namespace librbd {
 namespace object_map {
 
 void SnapshotRemoveRequest::send() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  ceph_assert(m_image_ctx.image_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
 
   if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
     int r = m_image_ctx.get_flags(m_snap_id, &m_flags);
@@ -66,8 +66,8 @@ void SnapshotRemoveRequest::handle_load_map(int r) {
     lderr(cct) << "failed to load object map " << oid << ": "
                << cpp_strerror(r) << dendl;
 
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     invalidate_next_map();
     return;
   }
@@ -79,8 +79,8 @@ void SnapshotRemoveRequest::remove_snapshot() {
   if ((m_flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) {
     // snapshot object map exists on disk but is invalid. cannot clean fast-diff
     // on next snapshot if current snapshot was invalid.
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     invalidate_next_map();
     return;
   }
@@ -112,20 +112,20 @@ void SnapshotRemoveRequest::handle_remove_snapshot(int r) {
     lderr(cct) << "failed to remove object map snapshot " << oid << ": "
                << cpp_strerror(r) << dendl;
 
-    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-    RWLock::WLocker image_locker(m_image_ctx.image_lock);
+    std::shared_lock owner_locker{m_image_ctx.owner_lock};
+    std::unique_lock image_locker{m_image_ctx.image_lock};
     invalidate_next_map();
     return;
   }
 
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock image_locker{m_image_ctx.image_lock};
   update_object_map();
   remove_map();
 }
 
 void SnapshotRemoveRequest::invalidate_next_map() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  ceph_assert(m_image_ctx.image_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << dendl;
@@ -185,7 +185,7 @@ void SnapshotRemoveRequest::handle_remove_map(int r) {
 }
 
 void SnapshotRemoveRequest::compute_next_snap_id() {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
 
   m_next_snap_id = CEPH_NOSNAP;
   std::map<librados::snap_t, SnapInfo>::const_iterator it =
@@ -199,8 +199,8 @@ void SnapshotRemoveRequest::compute_next_snap_id() {
 }
 
 void SnapshotRemoveRequest::update_object_map() {
-  assert(m_image_ctx.image_lock.is_locked());
-  RWLock::WLocker object_map_locker(*m_object_map_lock);
+  assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+  std::unique_lock object_map_locker{*m_object_map_lock};
   if (m_next_snap_id == m_image_ctx.snap_id && m_next_snap_id == CEPH_NOSNAP) {
     CephContext *cct = m_image_ctx.cct;
     ldout(cct, 5) << dendl;
index 2327fea841810f4fb637cd462dc87c3f59534618..1e9c75d81d16b79ee42aabdcd9058904053f4489 100644 (file)
@@ -9,8 +9,6 @@
 #include "common/bit_vector.hpp"
 #include "librbd/AsyncRequest.h"
 
-class RWLock;
-
 namespace librbd {
 namespace object_map {
 
@@ -42,7 +40,7 @@ public:
    * otherwise, the state machine proceeds to remove the object map.
    */
 
-  SnapshotRemoveRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+  SnapshotRemoveRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
                         ceph::BitVector<2> *object_map, uint64_t snap_id,
                         Context *on_finish)
     : AsyncRequest(image_ctx, on_finish),
@@ -58,7 +56,7 @@ protected:
   }
 
 private:
-  RWLock* m_object_map_lock;
+  ceph::shared_mutex* m_object_map_lock;
   ceph::BitVector<2> &m_object_map;
   uint64_t m_snap_id;
   uint64_t m_next_snap_id;
index 3e5d54dc3d4495db5a538d60557bc22162d1ee9d..476a33b2e1acc9a527da4bf560bff4b554b6822f 100644 (file)
@@ -94,7 +94,7 @@ void SnapshotRollbackRequest::send_read_map() {
 }
 
 void SnapshotRollbackRequest::send_write_map() {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
 
   CephContext *cct = m_image_ctx.cct;
   std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id,
@@ -114,8 +114,8 @@ void SnapshotRollbackRequest::send_write_map() {
 }
 
 void SnapshotRollbackRequest::send_invalidate_map() {
-  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock owner_locker{m_image_ctx.owner_lock};
+  std::unique_lock image_locker{m_image_ctx.image_lock};
 
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
index 0275034135c1bdca82195acbaa602af6d92bcc38..53a604bcf248e46aaacf458d41824ea41900b223 100644 (file)
@@ -33,8 +33,8 @@ void UpdateRequest<I>::send() {
 
 template <typename I>
 void UpdateRequest<I>::update_object_map() {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
-  ceph_assert(m_object_map_lock->is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+  ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
   CephContext *cct = m_image_ctx.cct;
 
   // break very large requests into manageable batches
@@ -80,8 +80,8 @@ void UpdateRequest<I>::handle_update_object_map(int r) {
   }
 
   {
-    RWLock::RLocker image_locker(m_image_ctx.image_lock);
-    RWLock::WLocker object_map_locker(*m_object_map_lock);
+    std::shared_lock image_locker{m_image_ctx.image_lock};
+    std::unique_lock object_map_locker{*m_object_map_lock};
     update_in_memory_object_map();
 
     if (m_update_end_object_no < m_end_object_no) {
@@ -97,8 +97,8 @@ void UpdateRequest<I>::handle_update_object_map(int r) {
 
 template <typename I>
 void UpdateRequest<I>::update_in_memory_object_map() {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
-  ceph_assert(m_object_map_lock->is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+  ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
 
   // rebuilding the object map might update on-disk only
   if (m_snap_id == m_image_ctx.snap_id) {
index ffaa883da547fa6225a8560cf42f4645101fd860..b5a72d591f3e14b62afcbecfbd14ae427763b0a1 100644 (file)
@@ -12,7 +12,6 @@
 #include <boost/optional.hpp>
 
 class Context;
-class RWLock;
 
 namespace librbd {
 
@@ -24,7 +23,7 @@ template <typename ImageCtxT = librbd::ImageCtx>
 class UpdateRequest : public Request {
 public:
   static UpdateRequest *create(ImageCtx &image_ctx,
-                               RWLock* object_map_lock,
+                               ceph::shared_mutex* object_map_lock,
                                ceph::BitVector<2> *object_map,
                                uint64_t snap_id, uint64_t start_object_no,
                                uint64_t end_object_no, uint8_t new_state,
@@ -37,7 +36,7 @@ public:
                              on_finish);
   }
 
-  UpdateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+  UpdateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
                 ceph::BitVector<2> *object_map, uint64_t snap_id,
                 uint64_t start_object_no, uint64_t end_object_no,
                 uint8_t new_state,
@@ -79,7 +78,7 @@ private:
    * @endverbatim
    */
 
-  RWLock* m_object_map_lock;
+  ceph::shared_mutex* m_object_map_lock;
   ceph::BitVector<2> &m_object_map;
   uint64_t m_start_object_no;
   uint64_t m_end_object_no;
index dc58a989ab952c5c62be396960ca35e190177cff..3dea085aff1abd934f40b92cf0f644897b847519 100644 (file)
@@ -41,7 +41,7 @@ template <typename I>
 void DisableFeaturesRequest<I>::send_op() {
   I &image_ctx = this->m_image_ctx;
   CephContext *cct = image_ctx.cct;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
                 << dendl;
@@ -94,7 +94,7 @@ void DisableFeaturesRequest<I>::send_block_writes() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  RWLock::WLocker locker(image_ctx.owner_lock);
+  std::unique_lock locker{image_ctx.owner_lock};
   image_ctx.io_work_queue->block_writes(create_context_callback<
     DisableFeaturesRequest<I>,
     &DisableFeaturesRequest<I>::handle_block_writes>(this));
@@ -113,7 +113,7 @@ Context *DisableFeaturesRequest<I>::handle_block_writes(int *result) {
   m_writes_blocked = true;
 
   {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
     // avoid accepting new requests from peers while we manipulate
     // the image features
     if (image_ctx.exclusive_lock != nullptr &&
@@ -139,7 +139,7 @@ void DisableFeaturesRequest<I>::send_acquire_exclusive_lock() {
     &DisableFeaturesRequest<I>::handle_acquire_exclusive_lock>(this);
 
   {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
     // if disabling features w/ exclusive lock supported, we need to
     // acquire the lock to temporarily block IO against the image
     if (image_ctx.exclusive_lock != nullptr &&
@@ -160,16 +160,16 @@ Context *DisableFeaturesRequest<I>::handle_acquire_exclusive_lock(int *result) {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
 
-  image_ctx.owner_lock.get_read();
+  image_ctx.owner_lock.lock_shared();
   if (*result < 0) {
     lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl;
-    image_ctx.owner_lock.put_read();
+    image_ctx.owner_lock.unlock_shared();
     return handle_finish(*result);
   } else if (image_ctx.exclusive_lock != nullptr &&
              !image_ctx.exclusive_lock->is_lock_owner()) {
     lderr(cct) << "failed to acquire exclusive lock" << dendl;
     *result = image_ctx.exclusive_lock->get_unlocked_op_error();
-    image_ctx.owner_lock.put_read();
+    image_ctx.owner_lock.unlock_shared();
     return handle_finish(*result);
   }
 
@@ -205,7 +205,7 @@ Context *DisableFeaturesRequest<I>::handle_acquire_exclusive_lock(int *result) {
       m_disable_flags |= RBD_FLAG_OBJECT_MAP_INVALID;
     }
   } while (false);
-  image_ctx.owner_lock.put_read();
+  image_ctx.owner_lock.unlock_shared();
 
   if (*result < 0) {
     return handle_finish(*result);
@@ -356,7 +356,7 @@ void DisableFeaturesRequest<I>::send_close_journal() {
   CephContext *cct = image_ctx.cct;
 
   {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
     if (image_ctx.journal != nullptr) {
       ldout(cct, 20) << this << " " << __func__ << dendl;
 
@@ -628,7 +628,7 @@ Context *DisableFeaturesRequest<I>::handle_finish(int r) {
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
   {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
     if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
       image_ctx.exclusive_lock->unblock_requests();
     }
index e2c1113d0e9fd7396f2aa169b2a6e26b006e592e..c01cca7ecdc03b3d59dc9455ae26147338cf6d2d 100644 (file)
@@ -38,7 +38,7 @@ template <typename I>
 void EnableFeaturesRequest<I>::send_op() {
   I &image_ctx = this->m_image_ctx;
   CephContext *cct = image_ctx.cct;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
                 << dendl;
@@ -90,7 +90,7 @@ void EnableFeaturesRequest<I>::send_block_writes() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
 
-  RWLock::WLocker locker(image_ctx.owner_lock);
+  std::unique_lock locker{image_ctx.owner_lock};
   image_ctx.io_work_queue->block_writes(create_context_callback<
     EnableFeaturesRequest<I>,
     &EnableFeaturesRequest<I>::handle_block_writes>(this));
@@ -163,7 +163,7 @@ Context *EnableFeaturesRequest<I>::handle_get_mirror_mode(int *result) {
 
   bool create_journal = false;
   do {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
 
     // avoid accepting new requests from peers while we manipulate
     // the image features
@@ -469,7 +469,7 @@ Context *EnableFeaturesRequest<I>::handle_finish(int r) {
   ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
 
   {
-    RWLock::WLocker locker(image_ctx.owner_lock);
+    std::unique_lock locker{image_ctx.owner_lock};
 
     if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
       image_ctx.exclusive_lock->unblock_requests();
index 59dc8b558cf08f5bb2ca219673aee9c137595bb8..d94e8421083f45c97a685906ee26730c9de78fbf 100644 (file)
@@ -36,7 +36,7 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     CephContext *cct = image_ctx.cct;
 
     if (image_ctx.exclusive_lock != nullptr &&
@@ -46,7 +46,7 @@ public:
     }
 
     {
-      RWLock::RLocker image_lock(image_ctx.image_lock);
+      std::shared_lock image_lock{image_ctx.image_lock};
       if (image_ctx.object_map != nullptr &&
           !image_ctx.object_map->object_may_not_exist(m_object_no)) {
         // can skip because the object already exists
@@ -93,12 +93,12 @@ void FlattenRequest<I>::send_op() {
 template <typename I>
 void FlattenRequest<I>::flatten_objects() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  assert(image_ctx.owner_lock.is_locked());
+  assert(ceph_mutex_is_locked(image_ctx.owner_lock));
   auto ctx = create_context_callback<
     FlattenRequest<I>,
     &FlattenRequest<I>::handle_flatten_objects>(this);
@@ -136,22 +136,22 @@ void FlattenRequest<I>::detach_child() {
   CephContext *cct = image_ctx.cct;
 
   // should have been canceled prior to releasing lock
-  image_ctx.owner_lock.get_read();
+  image_ctx.owner_lock.lock_shared();
   ceph_assert(image_ctx.exclusive_lock == nullptr ||
               image_ctx.exclusive_lock->is_lock_owner());
 
   // if there are no snaps, remove from the children object as well
   // (if snapshots remain, they have their own parent info, and the child
   // will be removed when the last snap goes away)
-  image_ctx.image_lock.get_read();
+  image_ctx.image_lock.lock_shared();
   if ((image_ctx.features & RBD_FEATURE_DEEP_FLATTEN) == 0 &&
       !image_ctx.snaps.empty()) {
-    image_ctx.image_lock.put_read();
-    image_ctx.owner_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
+    image_ctx.owner_lock.unlock_shared();
     detach_parent();
     return;
   }
-  image_ctx.image_lock.put_read();
+  image_ctx.image_lock.unlock_shared();
 
   ldout(cct, 5) << dendl;
   auto ctx = create_context_callback<
@@ -159,7 +159,7 @@ void FlattenRequest<I>::detach_child() {
     &FlattenRequest<I>::handle_detach_child>(this);
   auto req = image::DetachChildRequest<I>::create(image_ctx, ctx);
   req->send();
-  image_ctx.owner_lock.put_read();
+  image_ctx.owner_lock.unlock_shared();
 }
 
 template <typename I>
@@ -184,21 +184,21 @@ void FlattenRequest<I>::detach_parent() {
   ldout(cct, 5) << dendl;
 
   // should have been canceled prior to releasing lock
-  image_ctx.owner_lock.get_read();
+  image_ctx.owner_lock.lock_shared();
   ceph_assert(image_ctx.exclusive_lock == nullptr ||
               image_ctx.exclusive_lock->is_lock_owner());
 
   // stop early if the parent went away - it just means
   // another flatten finished first, so this one is useless.
-  image_ctx.image_lock.get_read();
+  image_ctx.image_lock.lock_shared();
   if (!image_ctx.parent) {
     ldout(cct, 5) << "image already flattened" << dendl;
-    image_ctx.image_lock.put_read();
-    image_ctx.owner_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
+    image_ctx.owner_lock.unlock_shared();
     this->complete(0);
     return;
   }
-  image_ctx.image_lock.put_read();
+  image_ctx.image_lock.unlock_shared();
 
   // remove parent from this (base) image
   auto ctx = create_context_callback<
@@ -206,7 +206,7 @@ void FlattenRequest<I>::detach_parent() {
     &FlattenRequest<I>::handle_detach_parent>(this);
   auto req = image::DetachParentRequest<I>::create(image_ctx, ctx);
   req->send();
-  image_ctx.owner_lock.put_read();
+  image_ctx.owner_lock.unlock_shared();
 }
 
 template <typename I>
index 828e7a5b6fd54f348c2e124d58617e13b8601a2a..c5d6141adb881c79e32b48dfc73a20106093d62f 100644 (file)
@@ -40,7 +40,7 @@ bool MetadataRemoveRequest<I>::should_complete(int r) {
 template <typename I>
 void MetadataRemoveRequest<I>::send_metadata_remove() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
index 760e9b1e32e4a1831b5d972e4ce8d5bdd305015f..5fb939352e2fe2e8086bfb4264b7d9ba2c1fa8d5 100644 (file)
@@ -41,7 +41,7 @@ bool MetadataSetRequest<I>::should_complete(int r) {
 template <typename I>
 void MetadataSetRequest<I>::send_metadata_set() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 20) << this << " " << __func__ << dendl;
index 9f698f7e1aa2b54a70a0b8663c8827b2acd7f0a6..2bb1207007b63e17dca5c22b3db1c21884cc205b 100644 (file)
@@ -40,7 +40,7 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     CephContext *cct = image_ctx.cct;
 
     if (image_ctx.exclusive_lock != nullptr &&
@@ -62,7 +62,7 @@ private:
 
   void start_async_op() {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     CephContext *cct = image_ctx.cct;
     ldout(cct, 10) << dendl;
 
@@ -95,13 +95,13 @@ private:
       return;
     }
 
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
     start_async_op();
   }
 
   bool is_within_overlap_bounds() {
     I &image_ctx = this->m_image_ctx;
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
 
     auto overlap = std::min(image_ctx.size, image_ctx.migration_info.overlap);
     return overlap > 0 &&
@@ -110,7 +110,7 @@ private:
 
   void migrate_object() {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     CephContext *cct = image_ctx.cct;
 
     auto ctx = create_context_callback<
@@ -158,7 +158,7 @@ private:
 template <typename I>
 void MigrateRequest<I>::send_op() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
   CephContext *cct = image_ctx.cct;
   ldout(cct, 10) << dendl;
 
@@ -182,7 +182,7 @@ template <typename I>
 void MigrateRequest<I>::migrate_objects() {
   I &image_ctx = this->m_image_ctx;
   CephContext *cct = image_ctx.cct;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   uint64_t overlap_objects = get_num_overlap_objects();
 
@@ -219,7 +219,7 @@ uint64_t MigrateRequest<I>::get_num_overlap_objects() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 10) << dendl;
 
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   auto overlap = image_ctx.migration_info.overlap;
 
index ce8cced1339087180cbc6921a79459825a044e25..37f9303fc3ba445ffba479e3a976b66cdbc6a462 100644 (file)
@@ -91,7 +91,7 @@ private:
 
   void send_list_snaps() {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     ldout(image_ctx.cct, 5) << m_oid
                            << " C_VerifyObjectCallback::send_list_snaps"
                             << dendl;
@@ -107,7 +107,7 @@ private:
 
   uint8_t get_object_state() {
     I &image_ctx = this->m_image_ctx;
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     for (std::vector<librados::clone_info_t>::const_iterator r =
            m_snap_set.clones.begin(); r != m_snap_set.clones.end(); ++r) {
       librados::snap_t from_snap_id;
@@ -137,7 +137,7 @@ private:
 
   uint64_t next_valid_snap_id(uint64_t snap_id) {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.image_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
     std::map<librados::snap_t, SnapInfo>::iterator it =
       image_ctx.snap_info.lower_bound(snap_id);
@@ -150,13 +150,13 @@ private:
   bool object_map_action(uint8_t new_state) {
     I &image_ctx = this->m_image_ctx;
     CephContext *cct = image_ctx.cct;
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
 
     // should have been canceled prior to releasing lock
     ceph_assert(image_ctx.exclusive_lock == nullptr ||
                 image_ctx.exclusive_lock->is_lock_owner());
 
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     ceph_assert(image_ctx.object_map != nullptr);
 
     uint8_t state = (*image_ctx.object_map)[m_object_no];
@@ -204,7 +204,7 @@ bool ObjectMapIterateRequest<I>::should_complete(int r) {
               << cpp_strerror(r) << dendl;
   }
 
-  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+  std::shared_lock owner_lock{m_image_ctx.owner_lock};
   switch (m_state) {
   case STATE_VERIFY_OBJECTS:
     if (m_invalidate.test_and_set()) {
@@ -235,13 +235,13 @@ bool ObjectMapIterateRequest<I>::should_complete(int r) {
 
 template <typename I>
 void ObjectMapIterateRequest<I>::send_verify_objects() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
   uint64_t snap_id;
   uint64_t num_objects;
   {
-    RWLock::RLocker l(m_image_ctx.image_lock);
+    std::shared_lock l{m_image_ctx.image_lock};
     snap_id = m_image_ctx.snap_id;
     num_objects = Striper::get_num_objects(m_image_ctx.layout,
                                            m_image_ctx.get_image_size(snap_id));
@@ -263,7 +263,7 @@ void ObjectMapIterateRequest<I>::send_verify_objects() {
 
 template <typename I>
 uint64_t ObjectMapIterateRequest<I>::get_image_size() const {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   if (m_image_ctx.snap_id == CEPH_NOSNAP) {
     if (!m_image_ctx.resize_reqs.empty()) {
       return m_image_ctx.resize_reqs.front()->get_image_size();
@@ -286,8 +286,8 @@ void ObjectMapIterateRequest<I>::send_invalidate_object_map() {
                                             true,
                                             this->create_callback_context());
 
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   req->send();
 }
 
index 2bed8f9ec9fac377cc16512416858ae5aa28dc65..1ee15015dab10a09c2af6e585001213007be49d6 100644 (file)
@@ -34,7 +34,7 @@ bool RebuildObjectMapRequest<I>::should_complete(int r) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl;
 
-  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+  std::shared_lock owner_lock{m_image_ctx.owner_lock};
   switch (m_state) {
   case STATE_RESIZE_OBJECT_MAP:
     ldout(cct, 5) << "RESIZE_OBJECT_MAP" << dendl;
@@ -93,17 +93,17 @@ bool RebuildObjectMapRequest<I>::should_complete(int r) {
 
 template <typename I>
 void RebuildObjectMapRequest<I>::send_resize_object_map() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
-  m_image_ctx.image_lock.get_read();
+  m_image_ctx.image_lock.lock_shared();
   ceph_assert(m_image_ctx.object_map != nullptr);
 
   uint64_t size = get_image_size();
   uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size);
 
   if (m_image_ctx.object_map->size() == num_objects) {
-    m_image_ctx.image_lock.put_read();
+    m_image_ctx.image_lock.unlock_shared();
     send_verify_objects();
     return;
   }
@@ -117,14 +117,14 @@ void RebuildObjectMapRequest<I>::send_resize_object_map() {
 
   m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT,
                                      this->create_callback_context());
-  m_image_ctx.image_lock.put_read();
+  m_image_ctx.image_lock.unlock_shared();
 }
 
 template <typename I>
 void RebuildObjectMapRequest<I>::send_trim_image() {
   CephContext *cct = m_image_ctx.cct;
 
-  RWLock::RLocker l(m_image_ctx.owner_lock);
+  std::shared_lock l{m_image_ctx.owner_lock};
 
   // should have been canceled prior to releasing lock
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
@@ -135,7 +135,7 @@ void RebuildObjectMapRequest<I>::send_trim_image() {
   uint64_t new_size;
   uint64_t orig_size;
   {
-    RWLock::RLocker l(m_image_ctx.image_lock);
+    std::shared_lock l{m_image_ctx.image_lock};
     ceph_assert(m_image_ctx.object_map != nullptr);
 
     new_size = get_image_size();
@@ -173,7 +173,7 @@ bool update_object_map(I& image_ctx, uint64_t object_no, uint8_t current_state,
 
 template <typename I>
 void RebuildObjectMapRequest<I>::send_verify_objects() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
   m_state = STATE_VERIFY_OBJECTS;
@@ -189,7 +189,7 @@ void RebuildObjectMapRequest<I>::send_verify_objects() {
 
 template <typename I>
 void RebuildObjectMapRequest<I>::send_save_object_map() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
   CephContext *cct = m_image_ctx.cct;
 
   ldout(cct, 5) << this << " send_save_object_map" << dendl;
@@ -199,14 +199,14 @@ void RebuildObjectMapRequest<I>::send_save_object_map() {
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
               m_image_ctx.exclusive_lock->is_lock_owner());
 
-  RWLock::RLocker image_locker(m_image_ctx.image_lock);
+  std::shared_lock image_locker{m_image_ctx.image_lock};
   ceph_assert(m_image_ctx.object_map != nullptr);
   m_image_ctx.object_map->aio_save(this->create_callback_context());
 }
 
 template <typename I>
 void RebuildObjectMapRequest<I>::send_update_header() {
-  ceph_assert(m_image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
 
   // should have been canceled prior to releasing lock
   ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
@@ -225,13 +225,13 @@ void RebuildObjectMapRequest<I>::send_update_header() {
   ceph_assert(r == 0);
   comp->release();
 
-  RWLock::WLocker image_locker(m_image_ctx.image_lock);
+  std::unique_lock image_locker{m_image_ctx.image_lock};
   m_image_ctx.update_flags(m_image_ctx.snap_id, flags, false);
 }
 
 template <typename I>
 uint64_t RebuildObjectMapRequest<I>::get_image_size() const {
-  ceph_assert(m_image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
   if (m_image_ctx.snap_id == CEPH_NOSNAP) {
     if (!m_image_ctx.resize_reqs.empty()) {
       return m_image_ctx.resize_reqs.front()->get_image_size();
index 823e82dd1412717d92c5b0e18dc9548382ea54f0..f62b549c6b9e8f999a10e03e159b470a12146190 100644 (file)
@@ -81,7 +81,7 @@ bool RenameRequest<I>::should_complete(int r) {
     return true;
   }
 
-  RWLock::RLocker owner_lock(image_ctx.owner_lock);
+  std::shared_lock owner_lock{image_ctx.owner_lock};
   switch (m_state) {
   case STATE_READ_SOURCE_HEADER:
     send_write_destination_header();
@@ -105,7 +105,7 @@ int RenameRequest<I>::filter_return_code(int r) const {
   CephContext *cct = image_ctx.cct;
 
   if (m_state == STATE_READ_SOURCE_HEADER && r == -ENOENT) {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.name == m_dest_name) {
       // signal that replay raced with itself
       return -EEXIST;
index 3f50acd5abb653b2f15c1ea566199b4fd2be826f..631d8dd5b201385a92961b7174a0cf90dcd73e46 100644 (file)
@@ -21,8 +21,8 @@ Request<I>::Request(I &image_ctx, Context *on_finish, uint64_t journal_op_tid)
 
 template <typename I>
 void Request<I>::send() {
-  I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  [[maybe_unused]] I &image_ctx = this->m_image_ctx;
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   // automatically create the event if we don't need to worry
   // about affecting concurrent IO ops
@@ -74,8 +74,8 @@ template <typename I>
 bool Request<I>::append_op_event() {
   I &image_ctx = this->m_image_ctx;
 
-  ceph_assert(image_ctx.owner_lock.is_locked());
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+  std::shared_lock image_locker{image_ctx.image_lock};
   if (image_ctx.journal != nullptr &&
       image_ctx.journal->is_journal_appending()) {
     append_op_event(util::create_context_callback<
@@ -88,7 +88,7 @@ bool Request<I>::append_op_event() {
 template <typename I>
 bool Request<I>::commit_op_event(int r) {
   I &image_ctx = this->m_image_ctx;
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   if (!m_appended_op_event) {
     return false;
@@ -131,8 +131,8 @@ void Request<I>::handle_commit_op_event(int r, int original_ret_val) {
 template <typename I>
 void Request<I>::replay_op_ready(Context *on_safe) {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
   ceph_assert(m_op_tid != 0);
 
   m_appended_op_event = true;
@@ -143,8 +143,8 @@ void Request<I>::replay_op_ready(Context *on_safe) {
 template <typename I>
 void Request<I>::append_op_event(Context *on_safe) {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 10) << this << " " << __func__ << dendl;
@@ -170,7 +170,7 @@ void Request<I>::handle_op_event_safe(int r) {
     ceph_assert(!can_affect_io());
 
     // haven't started the request state machine yet
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
     send_op();
   }
 }
index c1ca11ddc6dec9e7d3d3eb2fe3dbac9f93fcb9be..e32b49644cebbfd25a4b7c66787154dbd2522a2a 100644 (file)
@@ -38,8 +38,7 @@ protected:
     ImageCtxT &image_ctx = this->m_image_ctx;
 
     ceph_assert(can_affect_io());
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::scoped_lock locker{image_ctx.owner_lock, image_ctx.image_lock};
     if (image_ctx.journal != nullptr) {
       if (image_ctx.journal->is_journal_replaying()) {
         Context *ctx = util::create_context_callback<T, MF>(request);
index 7450bd4bd98df0de879ab4c362e8fc5ce6715805..464dd2af09ce9b302fe19a214b903902ad08389a 100644 (file)
@@ -43,7 +43,7 @@ ResizeRequest<I>::~ResizeRequest() {
   I &image_ctx = this->m_image_ctx;
   ResizeRequest *next_req = NULL;
   {
-    RWLock::WLocker image_locker(image_ctx.image_lock);
+    std::unique_lock image_locker{image_ctx.image_lock};
     ceph_assert(m_xlist_item.remove_myself());
     if (!image_ctx.resize_reqs.empty()) {
       next_req = image_ctx.resize_reqs.front();
@@ -51,7 +51,7 @@ ResizeRequest<I>::~ResizeRequest() {
   }
 
   if (next_req != NULL) {
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
     next_req->send();
   }
 }
@@ -59,10 +59,10 @@ ResizeRequest<I>::~ResizeRequest() {
 template <typename I>
 void ResizeRequest<I>::send() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   {
-    RWLock::WLocker image_locker(image_ctx.image_lock);
+    std::unique_lock image_locker{image_ctx.image_lock};
     if (!m_xlist_item.is_on_list()) {
       image_ctx.resize_reqs.push_back(&m_xlist_item);
       if (image_ctx.resize_reqs.front() != this) {
@@ -80,8 +80,8 @@ void ResizeRequest<I>::send() {
 
 template <typename I>
 void ResizeRequest<I>::send_op() {
-  I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  [[maybe_unused]] I &image_ctx = this->m_image_ctx;
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   if (this->is_canceled()) {
     this->async_complete(-ERESTART);
@@ -158,7 +158,7 @@ void ResizeRequest<I>::send_trim_image() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   TrimRequest<I> *req = TrimRequest<I>::create(
     image_ctx, create_context_callback<
       ResizeRequest<I>, &ResizeRequest<I>::handle_trim_image>(this),
@@ -191,7 +191,7 @@ void ResizeRequest<I>::send_flush_cache() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   auto ctx = create_context_callback<
     ResizeRequest<I>, &ResizeRequest<I>::handle_flush_cache>(this);
   auto aio_comp = io::AioCompletion::create_and_start(
@@ -225,7 +225,7 @@ void ResizeRequest<I>::send_invalidate_cache() {
 
   // need to invalidate since we're deleting objects, and
   // ObjectCacher doesn't track non-existent objects
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   image_ctx.io_object_dispatcher->invalidate_cache(create_context_callback<
     ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this));
 }
@@ -254,7 +254,7 @@ Context *ResizeRequest<I>::send_grow_object_map() {
   I &image_ctx = this->m_image_ctx;
 
   {
-    RWLock::WLocker image_locker(image_ctx.image_lock);
+    std::unique_lock image_locker{image_ctx.image_lock};
     m_shrink_size_visible = true;
   }
 
@@ -267,11 +267,11 @@ Context *ResizeRequest<I>::send_grow_object_map() {
     return nullptr;
   }
 
-  image_ctx.owner_lock.get_read();
-  image_ctx.image_lock.get_read();
+  image_ctx.owner_lock.lock_shared();
+  image_ctx.image_lock.lock_shared();
   if (image_ctx.object_map == nullptr) {
-    image_ctx.image_lock.put_read();
-    image_ctx.owner_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
+    image_ctx.owner_lock.unlock_shared();
 
     // IO is still blocked
     send_update_header();
@@ -288,8 +288,8 @@ Context *ResizeRequest<I>::send_grow_object_map() {
   image_ctx.object_map->aio_resize(
     m_new_size, OBJECT_NONEXISTENT, create_context_callback<
       ResizeRequest<I>, &ResizeRequest<I>::handle_grow_object_map>(this));
-  image_ctx.image_lock.put_read();
-  image_ctx.owner_lock.put_read();
+  image_ctx.image_lock.unlock_shared();
+  image_ctx.owner_lock.unlock_shared();
   return nullptr;
 }
 
@@ -315,11 +315,11 @@ template <typename I>
 Context *ResizeRequest<I>::send_shrink_object_map() {
   I &image_ctx = this->m_image_ctx;
 
-  image_ctx.owner_lock.get_read();
-  image_ctx.image_lock.get_read();
+  image_ctx.owner_lock.lock_shared();
+  image_ctx.image_lock.lock_shared();
   if (image_ctx.object_map == nullptr || m_new_size > m_original_size) {
-    image_ctx.image_lock.put_read();
-    image_ctx.owner_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
+    image_ctx.owner_lock.unlock_shared();
 
     update_size_and_overlap();
     return this->create_context_finisher(0);
@@ -336,8 +336,8 @@ Context *ResizeRequest<I>::send_shrink_object_map() {
   image_ctx.object_map->aio_resize(
     m_new_size, OBJECT_NONEXISTENT, create_context_callback<
       ResizeRequest<I>, &ResizeRequest<I>::handle_shrink_object_map>(this));
-  image_ctx.image_lock.put_read();
-  image_ctx.owner_lock.put_read();
+  image_ctx.image_lock.unlock_shared();
+  image_ctx.owner_lock.unlock_shared();
   return nullptr;
 }
 
@@ -364,7 +364,7 @@ void ResizeRequest<I>::send_post_block_writes() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   image_ctx.io_work_queue->block_writes(create_context_callback<
     ResizeRequest<I>, &ResizeRequest<I>::handle_post_block_writes>(this));
 }
@@ -394,7 +394,7 @@ void ResizeRequest<I>::send_update_header() {
                 << "new_size=" << m_new_size << dendl;;
 
   // should have been canceled prior to releasing lock
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   ceph_assert(image_ctx.exclusive_lock == nullptr ||
               image_ctx.exclusive_lock->is_lock_owner());
 
@@ -436,7 +436,7 @@ Context *ResizeRequest<I>::handle_update_header(int *result) {
 template <typename I>
 void ResizeRequest<I>::compute_parent_overlap() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
   if (image_ctx.parent == NULL) {
     m_new_parent_overlap = 0;
@@ -449,7 +449,7 @@ template <typename I>
 void ResizeRequest<I>::update_size_and_overlap() {
   I &image_ctx = this->m_image_ctx;
   {
-    RWLock::WLocker image_locker(image_ctx.image_lock);
+    std::unique_lock image_locker{image_ctx.image_lock};
     image_ctx.size = m_new_size;
 
     if (image_ctx.parent != NULL && m_new_size < m_original_size) {
index e6d0e9b9b0ed5fd4588841a379d3a44e635e78c8..763625b4ac917f2c3138e509b5314b83cca71649 100644 (file)
@@ -63,7 +63,7 @@ Context *SnapshotCreateRequest<I>::handle_suspend_requests(int *result) {
 template <typename I>
 void SnapshotCreateRequest<I>::send_suspend_aio() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -158,8 +158,8 @@ void SnapshotCreateRequest<I>::send_create_snap() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   // should have been canceled prior to releasing lock
   ceph_assert(image_ctx.exclusive_lock == nullptr ||
@@ -207,9 +207,9 @@ template <typename I>
 Context *SnapshotCreateRequest<I>::send_create_object_map() {
   I &image_ctx = this->m_image_ctx;
 
-  image_ctx.image_lock.get_read();
+  image_ctx.image_lock.lock_shared();
   if (image_ctx.object_map == nullptr || m_skip_object_map) {
-    image_ctx.image_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
 
     update_snap_context();
     image_ctx.io_work_queue->unblock_writes();
@@ -223,7 +223,7 @@ Context *SnapshotCreateRequest<I>::send_create_object_map() {
     m_snap_id, create_context_callback<
       SnapshotCreateRequest<I>,
       &SnapshotCreateRequest<I>::handle_create_object_map>(this));
-  image_ctx.image_lock.put_read();
+  image_ctx.image_lock.unlock_shared();
   return nullptr;
 }
 
@@ -276,8 +276,8 @@ template <typename I>
 void SnapshotCreateRequest<I>::update_snap_context() {
   I &image_ctx = this->m_image_ctx;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
-  RWLock::WLocker image_locker(image_ctx.image_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
+  std::unique_lock image_locker{image_ctx.image_lock};
   if (image_ctx.old_format) {
     return;
   }
index 5e4dce9e32b34cbc47e0ef6f91bddcab3caab4da..17aed5f6a501537711d5dbc97f56e690003a62f0 100644 (file)
@@ -40,13 +40,13 @@ bool SnapshotLimitRequest<I>::should_complete(int r) {
 template <typename I>
 void SnapshotLimitRequest<I>::send_limit_snaps() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
 
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
 
     librados::ObjectWriteOperation op;
     cls_client::snapshot_set_limit(&op, m_snap_limit);
index 92197f09dd6bf02ef1dffddab60c857148f04de1..f3b9e7e0b76f3eb0eafe154ba209cd9da15a078f 100644 (file)
@@ -61,7 +61,7 @@ bool SnapshotProtectRequest<I>::should_complete(int r) {
 template <typename I>
 void SnapshotProtectRequest<I>::send_protect_snap() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -76,7 +76,7 @@ void SnapshotProtectRequest<I>::send_protect_snap() {
 template <typename I>
 int SnapshotProtectRequest<I>::verify_and_send_protect_snap() {
   I &image_ctx = this->m_image_ctx;
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   CephContext *cct = image_ctx.cct;
   if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
index a7088687b4d4052dacb9629c64f1f2b31df371f8..e120aea4ae801263886faccac7215a14ab048b6c 100644 (file)
@@ -4,6 +4,7 @@
 #include "librbd/operation/SnapshotRemoveRequest.h"
 #include "common/dout.h"
 #include "common/errno.h"
+#include "include/ceph_assert.h"
 #include "cls/rbd/cls_rbd_client.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/ImageCtx.h"
@@ -36,9 +37,9 @@ void SnapshotRemoveRequest<I>::send_op() {
   I &image_ctx = this->m_image_ctx;
   CephContext *cct = image_ctx.cct;
 
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.snap_info.find(m_snap_id) == image_ctx.snap_info.end()) {
       lderr(cct) << "snapshot doesn't exist" << dendl;
       this->async_complete(-ENOENT);
@@ -158,7 +159,7 @@ void SnapshotRemoveRequest<I>::detach_child() {
 
   bool detach_child = false;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
 
     cls::rbd::ParentImageSpec our_pspec;
     int r = image_ctx.get_parent_spec(m_snap_id, &our_pspec);
@@ -223,8 +224,8 @@ void SnapshotRemoveRequest<I>::remove_object_map() {
   CephContext *cct = image_ctx.cct;
 
   {
-    RWLock::RLocker owner_lock(image_ctx.owner_lock);
-    RWLock::WLocker image_locker(image_ctx.image_lock);
+    std::shared_lock owner_lock{image_ctx.owner_lock};
+    std::unique_lock image_locker{image_ctx.image_lock};
     if (image_ctx.object_map != nullptr) {
       ldout(cct, 5) << dendl;
 
@@ -330,7 +331,7 @@ void SnapshotRemoveRequest<I>::remove_snap_context() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  RWLock::WLocker image_locker(image_ctx.image_lock);
+  std::unique_lock image_locker{image_ctx.image_lock};
   image_ctx.rm_snap(m_snap_namespace, m_snap_name, m_snap_id);
 }
 
@@ -338,7 +339,7 @@ template <typename I>
 int SnapshotRemoveRequest<I>::scan_for_parents(
     cls::rbd::ParentImageSpec &pspec) {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
   if (pspec.pool_id != -1) {
     map<uint64_t, SnapInfo>::iterator it;
index 973b306b0811019525fcc80d85436d160d4ac355..a957074b6794103553348ad040b49d24c865d6e3 100644 (file)
@@ -41,7 +41,7 @@ SnapshotRenameRequest<I>::SnapshotRenameRequest(I &image_ctx,
 template <typename I>
 journal::Event SnapshotRenameRequest<I>::create_event(uint64_t op_tid) const {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.image_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
 
   std::string src_snap_name;
   auto snap_info_it = image_ctx.snap_info.find(m_snap_id);
@@ -77,8 +77,8 @@ bool SnapshotRenameRequest<I>::should_complete(int r) {
 template <typename I>
 void SnapshotRenameRequest<I>::send_rename_snap() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
index fa4725231bc8c862fe27928519a8f1723aade3c3..e718259b5fbc69902e46f467d3cd1173b4dd5bfb 100644 (file)
@@ -47,7 +47,7 @@ public:
                    << m_object_num << dendl;
 
     {
-      RWLock::RLocker image_locker(image_ctx.image_lock);
+      std::shared_lock image_locker{image_ctx.image_lock};
       if (m_object_num < m_head_num_objects &&
           m_snap_object_map != nullptr &&
           !image_ctx.object_map->object_may_exist(m_object_num) &&
@@ -139,8 +139,8 @@ void SnapshotRollbackRequest<I>::send_resize_image() {
 
   uint64_t current_size;
   {
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
+    std::shared_lock image_locker{image_ctx.image_lock};
     current_size = image_ctx.get_image_size(CEPH_NOSNAP);
   }
 
@@ -154,7 +154,7 @@ void SnapshotRollbackRequest<I>::send_resize_image() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   Context *ctx = create_context_callback<
     SnapshotRollbackRequest<I>,
     &SnapshotRollbackRequest<I>::handle_resize_image>(this);
@@ -187,8 +187,8 @@ void SnapshotRollbackRequest<I>::send_get_snap_object_map() {
   bool object_map_enabled;
   CephContext *cct = image_ctx.cct;
   {
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
+    std::shared_lock image_locker{image_ctx.image_lock};
     object_map_enabled = (image_ctx.object_map != nullptr);
     int r = image_ctx.get_flags(m_snap_id, &flags);
     if (r < 0) {
@@ -238,8 +238,8 @@ void SnapshotRollbackRequest<I>::send_rollback_object_map() {
   I &image_ctx = this->m_image_ctx;
 
   {
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.object_map != nullptr) {
       CephContext *cct = image_ctx.cct;
       ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -280,10 +280,10 @@ void SnapshotRollbackRequest<I>::send_rollback_objects() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   uint64_t num_objects;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     num_objects = Striper::get_num_objects(image_ctx.layout,
                                            image_ctx.get_current_size());
   }
@@ -325,8 +325,8 @@ Context *SnapshotRollbackRequest<I>::send_refresh_object_map() {
 
   bool object_map_enabled;
   {
-    RWLock::RLocker owner_locker(image_ctx.owner_lock);
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock owner_locker{image_ctx.owner_lock};
+    std::shared_lock image_locker{image_ctx.image_lock};
     object_map_enabled = (image_ctx.object_map != nullptr);
   }
   if (!object_map_enabled) {
@@ -373,7 +373,7 @@ Context *SnapshotRollbackRequest<I>::send_invalidate_cache() {
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
 
-  RWLock::RLocker owner_lock(image_ctx.owner_lock);
+  std::shared_lock owner_lock{image_ctx.owner_lock};
   Context *ctx = create_context_callback<
     SnapshotRollbackRequest<I>,
     &SnapshotRollbackRequest<I>::handle_invalidate_cache>(this);
@@ -398,8 +398,8 @@ template <typename I>
 void SnapshotRollbackRequest<I>::apply() {
   I &image_ctx = this->m_image_ctx;
 
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
-  RWLock::WLocker image_locker(image_ctx.image_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
+  std::unique_lock image_locker{image_ctx.image_lock};
   if (image_ctx.object_map != nullptr) {
     std::swap(m_object_map, image_ctx.object_map);
   }
index 12e6383455f82500aeb96512b47bf8f50e41da64..76caf68f335e8bdc00c533512f0daca00f34db69 100644 (file)
@@ -64,7 +64,7 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
     CephContext *cct = image_ctx.cct;
     ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'"
@@ -183,7 +183,7 @@ bool SnapshotUnprotectRequest<I>::should_complete(int r) {
     return should_complete_error();
   }
 
-  RWLock::RLocker owner_lock(image_ctx.owner_lock);
+  std::shared_lock owner_lock{image_ctx.owner_lock};
   bool finished = false;
   switch (m_state) {
   case STATE_UNPROTECT_SNAP_START:
@@ -205,7 +205,7 @@ bool SnapshotUnprotectRequest<I>::should_complete(int r) {
 template <typename I>
 bool SnapshotUnprotectRequest<I>::should_complete_error() {
   I &image_ctx = this->m_image_ctx;
-  RWLock::RLocker owner_locker(image_ctx.owner_lock);
+  std::shared_lock owner_locker{image_ctx.owner_lock};
   CephContext *cct = image_ctx.cct;
   lderr(cct) << this << " " << __func__ << ": "
              << "ret_val=" << m_ret_val << dendl;
@@ -222,7 +222,7 @@ bool SnapshotUnprotectRequest<I>::should_complete_error() {
 template <typename I>
 void SnapshotUnprotectRequest<I>::send_unprotect_snap_start() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -237,7 +237,7 @@ void SnapshotUnprotectRequest<I>::send_unprotect_snap_start() {
 template <typename I>
 void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -270,7 +270,7 @@ void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
 template <typename I>
 void SnapshotUnprotectRequest<I>::send_unprotect_snap_finish() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -290,7 +290,7 @@ void SnapshotUnprotectRequest<I>::send_unprotect_snap_finish() {
 template <typename I>
 void SnapshotUnprotectRequest<I>::send_unprotect_snap_rollback() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << this << " " << __func__ << dendl;
@@ -310,7 +310,7 @@ void SnapshotUnprotectRequest<I>::send_unprotect_snap_rollback() {
 template <typename I>
 int SnapshotUnprotectRequest<I>::verify_and_send_unprotect_snap_start() {
   I &image_ctx = this->m_image_ctx;
-  RWLock::RLocker image_locker(image_ctx.image_lock);
+  std::shared_lock image_locker{image_ctx.image_lock};
 
   CephContext *cct = image_ctx.cct;
   if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
index 537a0ca41eb88f7585b78276bcb0a7917d58e2d8..0c5a4efab4b67e2b99c698b8ee6ab2a1a3f0012c 100644 (file)
@@ -120,7 +120,7 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
     ldout(m_cct, 20) << dendl;
 
@@ -131,7 +131,7 @@ public:
     }
 
     {
-      RWLock::RLocker image_locker(image_ctx.image_lock);
+      std::shared_lock image_locker{image_ctx.image_lock};
       if (image_ctx.object_map != nullptr &&
           !image_ctx.object_map->object_may_exist(m_object_no)) {
         // can skip because the object does not exist
@@ -204,14 +204,14 @@ public:
 
     ldout(m_cct, 20) << dendl;
 
-    image_ctx.owner_lock.get_read();
-    image_ctx.image_lock.get_read();
+    image_ctx.owner_lock.lock_shared();
+    image_ctx.image_lock.lock_shared();
     if (image_ctx.object_map == nullptr) {
       // possible that exclusive lock was lost in background
       lderr(m_cct) << "object map is not initialized" << dendl;
 
-      image_ctx.image_lock.put_read();
-      image_ctx.owner_lock.put_read();
+      image_ctx.image_lock.unlock_shared();
+      image_ctx.owner_lock.unlock_shared();
       finish_op(-EINVAL);
       return;
     }
@@ -220,8 +220,8 @@ public:
     m_finish_op_ctx = image_ctx.exclusive_lock->start_op(&r);
     if (m_finish_op_ctx == nullptr) {
       lderr(m_cct) << "lost exclusive lock" << dendl;
-      image_ctx.image_lock.put_read();
-      image_ctx.owner_lock.put_read();
+      image_ctx.image_lock.unlock_shared();
+      image_ctx.owner_lock.unlock_shared();
       finish_op(r);
       return;
     }
@@ -235,8 +235,8 @@ public:
                                    OBJECT_EXISTS, {}, false, ctx);
 
     // NOTE: state machine might complete before we reach here
-    image_ctx.image_lock.put_read();
-    image_ctx.owner_lock.put_read();
+    image_ctx.image_lock.unlock_shared();
+    image_ctx.owner_lock.unlock_shared();
     if (!sent) {
       finish_op(0);
     }
@@ -296,8 +296,8 @@ public:
       &C_SparsifyObject<I>::handle_post_update_object_map>(this);
     bool sent;
     {
-      RWLock::RLocker owner_locker(image_ctx.owner_lock);
-      RWLock::RLocker image_locker(image_ctx.image_lock);
+      std::shared_lock owner_locker{image_ctx.owner_lock};
+      std::shared_lock image_locker{image_ctx.image_lock};
 
       assert(image_ctx.exclusive_lock->is_lock_owner());
       assert(image_ctx.object_map != nullptr);
@@ -459,16 +459,16 @@ void SparsifyRequest<I>::send_op() {
 template <typename I>
 void SparsifyRequest<I>::sparsify_objects() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   CephContext *cct = image_ctx.cct;
   ldout(cct, 5) << dendl;
 
-  assert(image_ctx.owner_lock.is_locked());
+  assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   uint64_t objects = 0;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     objects = image_ctx.get_object_count(CEPH_NOSNAP);
   }
 
index ab7f31192092ca69a6db56552b5793d64a791bc0..f1a34a5592bf00b11534c2444446ebb22d9bc6f1 100644 (file)
@@ -39,7 +39,7 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     ceph_assert(image_ctx.exclusive_lock == nullptr ||
                 image_ctx.exclusive_lock->is_lock_owner());
 
@@ -69,12 +69,12 @@ public:
 
   int send() override {
     I &image_ctx = this->m_image_ctx;
-    ceph_assert(image_ctx.owner_lock.is_locked());
+    ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
     ceph_assert(image_ctx.exclusive_lock == nullptr ||
                 image_ctx.exclusive_lock->is_lock_owner());
 
     {
-      RWLock::RLocker image_locker(image_ctx.image_lock);
+      std::shared_lock image_locker{image_ctx.image_lock};
       if (image_ctx.object_map != nullptr &&
           !image_ctx.object_map->object_may_exist(m_object_no)) {
         return 1;
@@ -133,7 +133,7 @@ bool TrimRequest<I>::should_complete(int r)
     return true;
   }
 
-  RWLock::RLocker owner_lock(image_ctx.owner_lock);
+  std::shared_lock owner_lock{image_ctx.owner_lock};
   switch (m_state) {
   case STATE_PRE_TRIM:
     ldout(cct, 5) << " PRE_TRIM" << dendl;
@@ -180,7 +180,7 @@ void TrimRequest<I>::send() {
 template<typename I>
 void TrimRequest<I>::send_pre_trim() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   if (m_delete_start >= m_num_objects) {
     send_clean_boundary();
@@ -188,7 +188,7 @@ void TrimRequest<I>::send_pre_trim() {
   }
 
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_pre_trim: "
                               << " delete_start_min=" << m_delete_start_min
@@ -211,13 +211,13 @@ void TrimRequest<I>::send_pre_trim() {
 template<typename I>
 void TrimRequest<I>::send_copyup_objects() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   ::SnapContext snapc;
   bool has_snapshots;
   uint64_t parent_overlap;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
 
     snapc = image_ctx.snapc;
     has_snapshots = !image_ctx.snaps.empty();
@@ -258,7 +258,7 @@ void TrimRequest<I>::send_copyup_objects() {
 template <typename I>
 void TrimRequest<I>::send_remove_objects() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   ldout(image_ctx.cct, 5) << this << " send_remove_objects: "
                            << " delete_start=" << m_delete_start
@@ -279,10 +279,10 @@ void TrimRequest<I>::send_remove_objects() {
 template<typename I>
 void TrimRequest<I>::send_post_trim() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
 
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_post_trim:"
                               << " delete_start_min=" << m_delete_start_min
@@ -305,7 +305,7 @@ void TrimRequest<I>::send_post_trim() {
 template <typename I>
 void TrimRequest<I>::send_clean_boundary() {
   I &image_ctx = this->m_image_ctx;
-  ceph_assert(image_ctx.owner_lock.is_locked());
+  ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
   CephContext *cct = image_ctx.cct;
   if (m_delete_off <= m_new_size) {
     send_finish(0);
@@ -323,7 +323,7 @@ void TrimRequest<I>::send_clean_boundary() {
 
   ::SnapContext snapc;
   {
-    RWLock::RLocker image_locker(image_ctx.image_lock);
+    std::shared_lock image_locker{image_ctx.image_lock};
     snapc = image_ctx.snapc;
   }
 
index dfb95aec0b850972f676423ca5dcb80666caa92e..2715f25926261753d422b36fd89df1f78038f007 100644 (file)
@@ -38,18 +38,18 @@ void Notifier::C_AioNotify::finish(int r) {
 
 Notifier::Notifier(ContextWQ *work_queue, IoCtx &ioctx, const std::string &oid)
   : m_work_queue(work_queue), m_ioctx(ioctx), m_oid(oid),
-    m_aio_notify_lock(util::unique_lock_name(
-      "librbd::object_watcher::Notifier::m_aio_notify_lock", this)) {
+    m_aio_notify_lock(ceph::make_mutex(util::unique_lock_name(
+      "librbd::object_watcher::Notifier::m_aio_notify_lock", this))) {
   m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
 }
 
 Notifier::~Notifier() {
-  Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+  std::lock_guard aio_notify_locker{m_aio_notify_lock};
   ceph_assert(m_pending_aio_notifies == 0);
 }
 
 void Notifier::flush(Context *on_finish) {
-  Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+  std::lock_guard aio_notify_locker{m_aio_notify_lock};
   if (m_pending_aio_notifies == 0) {
     m_work_queue->queue(on_finish, 0);
     return;
@@ -61,7 +61,7 @@ void Notifier::flush(Context *on_finish) {
 void Notifier::notify(bufferlist &bl, NotifyResponse *response,
                       Context *on_finish) {
   {
-    Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+    std::lock_guard aio_notify_locker{m_aio_notify_lock};
     ++m_pending_aio_notifies;
 
     ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl;
@@ -77,7 +77,7 @@ void Notifier::notify(bufferlist &bl, NotifyResponse *response,
 void Notifier::handle_notify(int r, Context *on_finish) {
   ldout(m_cct, 20) << "r=" << r << dendl;
 
-  Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+  std::lock_guard aio_notify_locker{m_aio_notify_lock};
   ceph_assert(m_pending_aio_notifies > 0);
   --m_pending_aio_notifies;
 
index 8b0ad37b4d3d08c4bfd4c3fe23322affd487f803..5bfb10399032fafcdf77f6886a0529ddb4c011dc 100644 (file)
@@ -8,7 +8,7 @@
 #include "include/buffer_fwd.h"
 #include "include/Context.h"
 #include "include/rados/librados.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
 #include "common/WorkQueue.h"
 #include <list>
 
@@ -49,7 +49,7 @@ private:
   CephContext *m_cct;
   std::string m_oid;
 
-  Mutex m_aio_notify_lock;
+  ceph::mutex m_aio_notify_lock;
   size_t m_pending_aio_notifies = 0;
   Contexts m_aio_notify_flush_ctxs;
 
index 40c3dfe7dd281c95ebf474d20f2a0b780eca2d02..cf44cf385cb3a34df375489ab6e72b852bfa8af0 100644 (file)
@@ -2,7 +2,7 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "librbd/watcher/RewatchRequest.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
 #include "common/errno.h"
 #include "librbd/Utils.h"
 
@@ -21,7 +21,7 @@ namespace watcher {
 using std::string;
 
 RewatchRequest::RewatchRequest(librados::IoCtx& ioctx, const string& oid,
-                               RWLock &watch_lock,
+                               ceph::shared_mutex &watch_lock,
                                librados::WatchCtx2 *watch_ctx,
                                uint64_t *watch_handle, Context *on_finish)
   : m_ioctx(ioctx), m_oid(oid), m_watch_lock(watch_lock),
@@ -34,7 +34,7 @@ void RewatchRequest::send() {
 }
 
 void RewatchRequest::unwatch() {
-  ceph_assert(m_watch_lock.is_wlocked());
+  ceph_assert(ceph_mutex_is_wlocked(m_watch_lock));
   if (*m_watch_handle == 0) {
     rewatch();
     return;
@@ -88,7 +88,7 @@ void RewatchRequest::handle_rewatch(int r) {
   }
 
   {
-    RWLock::WLocker watch_locker(m_watch_lock);
+    std::unique_lock watch_locker{m_watch_lock};
     *m_watch_handle = m_rewatch_handle;
   }
 
index d4fc250abec7ebd39d284a2bb5cac0d13a88fe15..ce5e31539e4221a4aba205846c88a400e1c7da09 100644 (file)
@@ -4,11 +4,11 @@
 #ifndef CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
 #define CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
 
+#include "common/ceph_mutex.h"
 #include "include/int_types.h"
 #include "include/rados/librados.hpp"
 
 struct Context;
-struct RWLock;
 
 namespace librbd {
 
@@ -18,7 +18,7 @@ class RewatchRequest {
 public:
 
   static RewatchRequest *create(librados::IoCtx& ioctx, const std::string& oid,
-                                RWLock &watch_lock,
+                                ceph::shared_mutex &watch_lock,
                                 librados::WatchCtx2 *watch_ctx,
                                 uint64_t *watch_handle, Context *on_finish) {
     return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
@@ -26,7 +26,7 @@ public:
   }
 
   RewatchRequest(librados::IoCtx& ioctx, const std::string& oid,
-                 RWLock &watch_lock, librados::WatchCtx2 *watch_ctx,
+                 ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
                  uint64_t *watch_handle, Context *on_finish);
 
   void send();
@@ -53,7 +53,7 @@ private:
 
   librados::IoCtx& m_ioctx;
   std::string m_oid;
-  RWLock &m_watch_lock;
+  ceph::shared_mutex &m_watch_lock;
   librados::WatchCtx2 *m_watch_ctx;
   uint64_t *m_watch_handle;
   Context *m_on_finish;