const AsyncRequest<T>* async_request, T &image_ctx,
const ContextFactory& context_factory, Context *ctx,
ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no)
- : m_lock(util::unique_lock_name("librbd::AsyncThrottle::m_lock", this)),
+ : m_lock(ceph::make_mutex(
+ util::unique_lock_name("librbd::AsyncThrottle::m_lock", this))),
m_async_request(async_request), m_image_ctx(image_ctx),
m_context_factory(context_factory), m_ctx(ctx), m_prog_ctx(prog_ctx),
m_object_no(object_no), m_end_object_no(end_object_no), m_current_ops(0),
template <typename T>
void AsyncObjectThrottle<T>::start_ops(uint64_t max_concurrent) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bool complete;
{
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
for (uint64_t i = 0; i < max_concurrent; ++i) {
start_next_op();
if (m_ret < 0 && m_current_ops == 0) {
void AsyncObjectThrottle<T>::finish_op(int r) {
bool complete;
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- Mutex::Locker locker(m_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::lock_guard locker{m_lock};
--m_current_ops;
if (r < 0 && r != -ENOENT && m_ret == 0) {
m_ret = r;
void finish_op(int r) override;
private:
- Mutex m_lock;
+ ceph::mutex m_lock;
const AsyncRequest<ImageCtxT> *m_async_request;
ImageCtxT &m_image_ctx;
ContextFactory m_context_factory;
template <typename T>
void AsyncRequest<T>::start_request() {
- Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock);
+ std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
m_image_ctx.async_requests.push_back(&m_xlist_item);
}
void AsyncRequest<T>::finish_request() {
decltype(m_image_ctx.async_requests_waiters) waiters;
{
- Mutex::Locker async_ops_locker(m_image_ctx.async_ops_lock);
+ std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
ceph_assert(m_xlist_item.remove_myself());
if (m_image_ctx.async_requests.empty()) {
#include "include/int_types.h"
#include "common/dout.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/set.hpp>
#include <deque>
typedef std::list<BlockOperation> BlockOperations;
BlockGuard(CephContext *cct)
- : m_cct(cct), m_lock("librbd::BlockGuard::m_lock") {
+ : m_cct(cct) {
}
BlockGuard(const BlockGuard&) = delete;
*/
int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
BlockGuardCell **cell) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ldout(m_cct, 20) << "block_start=" << block_extent.block_start << ", "
<< "block_end=" << block_extent.block_end << ", "
<< "free_slots=" << m_free_detained_block_extents.size()
* Release any detained IO operations from the provided cell.
*/
void release(BlockGuardCell *cell, BlockOperations *block_operations) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(cell != nullptr);
auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock");
DetainedBlockExtentsPool m_detained_block_extent_pool;
DetainedBlockExtents m_free_detained_block_extents;
BlockExtentToDetainedBlockExtents m_detained_block_extents;
m_object_number(object_number), m_work_queue(work_queue),
m_snap_seqs(snap_seqs), m_prog_ctx(prog_ctx), m_on_finish(on_finish),
m_cct(dst_image_ctx->cct),
- m_lock(unique_lock_name("DeepCopyRequest::m_lock", this)) {
+ m_lock(ceph::make_mutex(unique_lock_name("DeepCopyRequest::m_lock", this))) {
}
template <typename I>
template <typename I>
void DeepCopyRequest<I>::cancel() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
template <typename I>
void DeepCopyRequest<I>::send_copy_snapshots() {
- m_lock.Lock();
+ m_lock.lock();
if (m_canceled) {
- m_lock.Unlock();
+ m_lock.unlock();
finish(-ECANCELED);
return;
}
m_src_image_ctx, m_dst_image_ctx, m_snap_id_end, m_flatten, m_work_queue,
m_snap_seqs, ctx);
m_snapshot_copy_request->get();
- m_lock.Unlock();
+ m_lock.unlock();
m_snapshot_copy_request->send();
}
ldout(m_cct, 20) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_snapshot_copy_request->put();
m_snapshot_copy_request = nullptr;
if (r == 0 && m_canceled) {
template <typename I>
void DeepCopyRequest<I>::send_copy_image() {
- m_lock.Lock();
+ m_lock.lock();
if (m_canceled) {
- m_lock.Unlock();
+ m_lock.unlock();
finish(-ECANCELED);
return;
}
m_src_image_ctx, m_dst_image_ctx, m_snap_id_start, m_snap_id_end,
m_flatten, m_object_number, *m_snap_seqs, m_prog_ctx, ctx);
m_image_copy_request->get();
- m_lock.Unlock();
+ m_lock.unlock();
m_image_copy_request->send();
}
ldout(m_cct, 20) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_image_copy_request->put();
m_image_copy_request = nullptr;
if (r == 0 && m_canceled) {
template <typename I>
void DeepCopyRequest<I>::send_copy_object_map() {
- m_dst_image_ctx->owner_lock.get_read();
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->owner_lock.lock_shared();
+ m_dst_image_ctx->image_lock.lock_shared();
if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP,
m_dst_image_ctx->image_lock)) {
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
send_copy_metadata();
return;
}
if (m_snap_id_end == CEPH_NOSNAP) {
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
send_refresh_object_map();
return;
}
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
finish(r);
return;
}
ceph_assert(m_snap_seqs->count(m_snap_id_end) > 0);
librados::snap_t copy_snap_id = (*m_snap_seqs)[m_snap_id_end];
m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx);
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
}
template <typename I>
int r;
Context *finish_op_ctx = nullptr;
{
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock != nullptr) {
finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
}
}
{
- RWLock::WLocker image_locker(m_dst_image_ctx->image_lock);
+ std::unique_lock image_locker{m_dst_image_ctx->image_lock};
std::swap(m_dst_image_ctx->object_map, m_object_map);
}
delete m_object_map;
template <typename I>
int DeepCopyRequest<I>::validate_copy_points() {
- RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+ std::shared_lock image_locker{m_src_image_ctx->image_lock};
if (m_snap_id_start != 0 &&
m_src_image_ctx->snap_info.find(m_snap_id_start) ==
#ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_REQUEST_H
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "include/int_types.h"
#include "librbd/ImageCtx.h"
Context *m_on_finish;
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_canceled = false;
deep_copy::SnapshotCopyRequest<ImageCtxT> *m_snapshot_copy_request = nullptr;
#include "librbd/exclusive_lock/PreReleaseRequest.h"
#include "librbd/io/ImageRequestWQ.h"
#include "librbd/Utils.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_rbd
image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
m_image_ctx(image_ctx) {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_uninitialized();
}
template <typename I>
bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
bool accept_requests = (!ML<I>::is_state_shutdown() &&
ML<I>::is_state_locked() &&
template <typename I>
bool ExclusiveLock<I>::accept_ops() const {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
bool accept = accept_ops(ML<I>::m_lock);
ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
return accept;
}
template <typename I>
-bool ExclusiveLock<I>::accept_ops(const Mutex &lock) const {
+bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
return (!ML<I>::is_state_shutdown() &&
(ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
}
template <typename I>
void ExclusiveLock<I>::block_requests(int r) {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
m_request_blocked_count++;
if (m_request_blocked_ret_val == 0) {
template <typename I>
void ExclusiveLock<I>::unblock_requests() {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ceph_assert(m_request_blocked_count > 0);
m_request_blocked_count--;
template <typename I>
void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ldout(m_image_ctx.cct, 10) << dendl;
{
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_initializing();
}
template <typename I>
void ExclusiveLock<I>::handle_peer_notification(int r) {
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
if (!ML<I>::is_state_waiting_for_lock()) {
return;
}
template <typename I>
Context *ExclusiveLock<I>::start_op(int* ret_val) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- Mutex::Locker locker(ML<I>::m_lock);
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ std::lock_guard locker{ML<I>::m_lock};
if (!accept_ops(ML<I>::m_lock)) {
*ret_val = get_unlocked_op_error();
ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.clone_copy_on_read ||
(features & RBD_FEATURE_JOURNALING) != 0) {
m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
}
}
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_unlocked();
}
ldout(m_image_ctx.cct, 10) << dendl;
{
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+ std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
m_image_ctx.exclusive_lock = nullptr;
}
int acquire_lock_peer_ret_val = 0;
{
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
}
on_finish->complete(r);
return;
} else if (r < 0) {
- ML<I>::m_lock.Lock();
+ ML<I>::m_lock.lock();
ceph_assert(ML<I>::is_state_acquiring());
// PostAcquire state machine will not run, so we need complete prepare
// if lock is in-use by another client, request the lock
if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
ML<I>::set_state_waiting_for_lock();
- ML<I>::m_lock.Unlock();
+ ML<I>::m_lock.unlock();
// request the lock from a peer
m_image_ctx.image_watcher->notify_request_lock();
// inform manage lock that we have interrupted the state machine
r = -ECANCELED;
} else {
- ML<I>::m_lock.Unlock();
+ ML<I>::m_lock.unlock();
// clear error if peer owns lock
if (r == -EAGAIN) {
return;
}
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
m_pre_post_callback = on_finish;
using EL = ExclusiveLock<I>;
PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
ldout(m_image_ctx.cct, 10) << dendl;
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ceph_assert(r == 0);
Context *on_finish = nullptr;
{
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
assert (m_pre_post_callback != nullptr);
void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
<< shutting_down << dendl;
if (!shutting_down) {
{
- Mutex::Locker locker(ML<I>::m_lock);
+ std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
}
}
} else {
{
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+ std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
m_image_ctx.exclusive_lock = nullptr;
}
int m_acquire_lock_peer_ret_val = 0;
- bool accept_ops(const Mutex &lock) const;
+ bool accept_ops(const ceph::mutex &lock) const;
void handle_init_complete(uint64_t features);
void handle_post_acquiring_lock(int r);
class SafeTimerSingleton : public SafeTimer {
public:
- Mutex lock;
+ ceph::mutex lock = ceph::make_mutex("librbd::Journal::SafeTimerSingleton::lock");
explicit SafeTimerSingleton(CephContext *cct)
- : SafeTimer(cct, lock, true),
- lock("librbd::Journal::SafeTimerSingleton::lock") {
+ : SafeTimer(cct, lock, true) {
init();
}
~SafeTimerSingleton() {
- Mutex::Locker locker(lock);
+ std::lock_guard locker{lock};
shutdown();
}
};
name(image_name),
image_watcher(NULL),
journal(NULL),
- owner_lock(util::unique_lock_name("librbd::ImageCtx::owner_lock", this)),
- image_lock(util::unique_lock_name("librbd::ImageCtx::image_lock", this)),
- timestamp_lock(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this)),
- async_ops_lock(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this)),
- copyup_list_lock(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this)),
+ owner_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::owner_lock", this))),
+ image_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::image_lock", this))),
+ timestamp_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this))),
+ async_ops_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this))),
+ copyup_list_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this))),
extra_read_flags(0),
old_format(false),
order(0), size(0), features(0),
}
int ImageCtx::snap_set(uint64_t in_snap_id) {
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
auto it = snap_info.find(in_snap_id);
if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) {
snap_id = in_snap_id;
void ImageCtx::snap_unset()
{
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
snap_id = CEPH_NOSNAP;
snap_namespace = {};
snap_name = "";
snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const string& in_snap_name) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
auto it = snap_ids.find({in_snap_namespace, in_snap_name});
if (it != snap_ids.end()) {
return it->second;
const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
map<snap_t, SnapInfo>::const_iterator it =
snap_info.find(in_snap_id);
if (it != snap_info.end())
int ImageCtx::get_snap_name(snap_t in_snap_id,
string *out_snap_name) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_name = info->name;
int ImageCtx::get_snap_namespace(snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_namespace = info->snap_namespace;
uint64_t ImageCtx::get_current_size() const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
return size;
}
void ImageCtx::set_access_timestamp(utime_t at)
{
- ceph_assert(timestamp_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(timestamp_lock));
access_timestamp = at;
}
void ImageCtx::set_modify_timestamp(utime_t mt)
{
- ceph_assert(timestamp_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(timestamp_lock));
modify_timestamp = mt;
}
int ImageCtx::is_snap_protected(snap_t in_snap_id,
bool *is_protected) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_protected =
int ImageCtx::is_snap_unprotected(snap_t in_snap_id,
bool *is_unprotected) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_unprotected =
uint8_t protection_status, uint64_t flags,
utime_t timestamp)
{
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.push_back(id);
SnapInfo info(in_snap_name, in_snap_namespace,
in_size, parent, protection_status, flags, timestamp);
string in_snap_name,
snap_t id)
{
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end());
snap_info.erase(id);
snap_ids.erase({in_snap_namespace, in_snap_name});
uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP) {
if (!resize_reqs.empty() &&
resize_reqs.front()->shrinking()) {
}
uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const {
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t image_size = get_image_size(in_snap_id);
return Striper::get_num_objects(layout, image_size);
}
bool ImageCtx::test_features(uint64_t features) const
{
- RWLock::RLocker l(image_lock);
+ std::shared_lock l{image_lock};
return test_features(features, image_lock);
}
bool ImageCtx::test_features(uint64_t in_features,
- const RWLock &in_image_lock) const
+ const ceph::shared_mutex &in_image_lock) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
return ((features & in_features) == in_features);
}
bool ImageCtx::test_op_features(uint64_t in_op_features) const
{
- RWLock::RLocker image_locker(image_lock);
+ std::shared_lock l{image_lock};
return test_op_features(in_op_features, image_lock);
}
bool ImageCtx::test_op_features(uint64_t in_op_features,
- const RWLock &in_image_lock) const
+ const ceph::shared_mutex &in_image_lock) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
return ((op_features & in_op_features) == in_op_features);
}
int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
if (_snap_id == CEPH_NOSNAP) {
*_flags = flags;
return 0;
int ImageCtx::test_flags(librados::snap_t in_snap_id,
uint64_t flags, bool *flags_set) const
{
- RWLock::RLocker l(image_lock);
+ std::shared_lock l{image_lock};
return test_flags(in_snap_id, flags, image_lock, flags_set);
}
int ImageCtx::test_flags(librados::snap_t in_snap_id,
- uint64_t flags, const RWLock &in_image_lock,
+ uint64_t flags,
+ const ceph::shared_mutex &in_image_lock,
bool *flags_set) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t snap_flags;
int r = get_flags(in_snap_id, &snap_flags);
if (r < 0) {
int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled)
{
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
uint64_t *_flags;
if (in_snap_id == CEPH_NOSNAP) {
_flags = &flags;
const ParentImageInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP)
return &parent_md;
const SnapInfo *info = get_snap_info(in_snap_id);
int ImageCtx::get_parent_overlap(snap_t in_snap_id, uint64_t *overlap) const
{
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
const auto info = get_parent_info(in_snap_id);
if (info) {
*overlap = info->overlap;
void ImageCtx::cancel_async_requests(Context *on_finish) {
{
- Mutex::Locker async_ops_locker(async_ops_lock);
+ std::lock_guard async_ops_locker{async_ops_lock};
if (!async_requests.empty()) {
ldout(cct, 10) << "canceling async requests: count="
<< async_requests.size() << dendl;
void ImageCtx::set_image_name(const std::string &image_name) {
// update the name so rename can be invoked repeatedly
- RWLock::RLocker owner_locker(owner_lock);
- RWLock::WLocker image_locker(image_lock);
+ std::shared_lock owner_locker{owner_lock};
+ std::unique_lock image_locker{image_lock};
name = image_name;
if (old_format) {
header_oid = util::old_header_name(image_name);
}
exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const {
- ceph_assert(owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(owner_lock));
ceph_assert(exclusive_lock_policy != nullptr);
return exclusive_lock_policy;
}
void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) {
- ceph_assert(owner_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(owner_lock));
ceph_assert(policy != nullptr);
delete exclusive_lock_policy;
exclusive_lock_policy = policy;
}
journal::Policy *ImageCtx::get_journal_policy() const {
- ceph_assert(image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_lock));
ceph_assert(journal_policy != nullptr);
return journal_policy;
}
void ImageCtx::set_journal_policy(journal::Policy *policy) {
- ceph_assert(image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(image_lock));
ceph_assert(policy != nullptr);
delete journal_policy;
journal_policy = policy;
}
void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer,
- Mutex **timer_lock) {
+ ceph::mutex **timer_lock) {
auto safe_timer_singleton =
&cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
"librbd::journal::safe_timer", false, cct);
#include <vector>
#include "common/allocator.h"
+#include "common/ceph_mutex.h"
#include "common/config_proxy.h"
#include "common/event_socket.h"
-#include "common/Mutex.h"
#include "common/Readahead.h"
-#include "common/RWLock.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
* owner_lock, image_lock
* async_op_lock, timestamp_lock
*/
- RWLock owner_lock; // protects exclusive lock leadership updates
- RWLock image_lock; // protects snapshot-related member variables,
+ ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates
+ mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables,
// features (and associated helper classes), and flags
// protects access to the mutable image metadata that
// isn't guarded by other locks below, and blocks writes
// object_map
// parent_md and parent
- RWLock timestamp_lock; // protects (create/access/modify)_timestamp
- Mutex async_ops_lock; // protects async_ops and async_requests
- Mutex copyup_list_lock; // protects copyup_waiting_list
+ ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp
+ ceph::mutex async_ops_lock; // protects async_ops and async_requests
+ ceph::mutex copyup_list_lock; // protects copyup_waiting_list
unsigned extra_read_flags;
uint64_t get_object_count(librados::snap_t in_snap_id) const;
bool test_features(uint64_t test_features) const;
bool test_features(uint64_t test_features,
- const RWLock &in_image_lock) const;
+ const ceph::shared_mutex &in_image_lock) const;
bool test_op_features(uint64_t op_features) const;
bool test_op_features(uint64_t op_features,
- const RWLock &in_image_lock) const;
+ const ceph::shared_mutex &in_image_lock) const;
int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const;
int test_flags(librados::snap_t in_snap_id,
uint64_t test_flags, bool *flags_set) const;
int test_flags(librados::snap_t in_snap_id,
- uint64_t test_flags, const RWLock &in_image_lock,
+ uint64_t test_flags, const ceph::shared_mutex &in_image_lock,
bool *flags_set) const;
int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled);
ThreadPool **thread_pool,
ContextWQ **op_work_queue);
static void get_timer_instance(CephContext *cct, SafeTimer **timer,
- Mutex **timer_lock);
+ ceph::mutex **timer_lock);
};
}
public:
explicit ImageUpdateWatchers(CephContext *cct) : m_cct(cct),
- m_lock(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this)) {
+ m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this))) {
}
~ImageUpdateWatchers() {
void flush(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_in_flight.empty()) {
Context *ctx = new FunctionContext(
[this, on_finish](int r) {
void shut_down(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
m_watchers.clear();
if (!m_in_flight.empty()) {
void register_watcher(UpdateWatchCtx *watcher, uint64_t *handle) {
ldout(m_cct, 20) << __func__ << ": watcher=" << watcher << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
create_work_queue();
<< handle << dendl;
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_watchers.find(handle);
if (it == m_watchers.end()) {
r = -ENOENT;
void notify() {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto it : m_watchers) {
send_notify(it.first, it.second);
}
}
void send_notify(uint64_t handle, UpdateWatchCtx *watcher) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
Context *on_shut_down_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto in_flight_it = m_in_flight.find(handle);
ceph_assert(in_flight_it != m_in_flight.end());
};
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock;
ContextWQ *m_work_queue = nullptr;
std::map<uint64_t, UpdateWatchCtx*> m_watchers;
uint64_t m_next_handle = 0;
template <typename I>
ImageState<I>::ImageState(I *image_ctx)
: m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED),
- m_lock(util::unique_lock_name("librbd::ImageState::m_lock", this)),
+ m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageState::m_lock", this))),
m_last_refresh(0), m_refresh_seq(0),
m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)) {
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
- m_lock.Lock();
+ m_lock.lock();
ceph_assert(m_state == STATE_UNINITIALIZED);
m_open_flags = flags;
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
- m_lock.Lock();
+ m_lock.lock();
ceph_assert(!is_closed());
Action action(ACTION_TYPE_CLOSE);
template <typename I>
void ImageState<I>::handle_update_notification() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
++m_refresh_seq;
CephContext *cct = m_image_ctx->cct;
template <typename I>
bool ImageState<I>::is_refresh_required() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return (m_last_refresh != m_refresh_seq || find_pending_refresh() != nullptr);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
- m_lock.Lock();
+ m_lock.lock();
if (is_closed()) {
- m_lock.Unlock();
+ m_lock.unlock();
on_finish->complete(-ESHUTDOWN);
return;
}
int ImageState<I>::refresh_if_required() {
C_SaferCond ctx;
{
- m_lock.Lock();
+ m_lock.lock();
Action action(ACTION_TYPE_REFRESH);
action.refresh_seq = m_refresh_seq;
// if a refresh is in-flight, delay until it is finished
action = *refresh_action;
} else if (m_last_refresh == m_refresh_seq) {
- m_lock.Unlock();
+ m_lock.unlock();
return 0;
} else if (is_closed()) {
- m_lock.Unlock();
+ m_lock.unlock();
return -ESHUTDOWN;
}
template <typename I>
const typename ImageState<I>::Action *
ImageState<I>::find_pending_refresh() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = std::find_if(m_actions_contexts.rbegin(),
m_actions_contexts.rend(),
Action action(ACTION_TYPE_SET_SNAP);
action.snap_id = snap_id;
- m_lock.Lock();
+ m_lock.lock();
execute_action_unlock(action, on_finish);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
- m_lock.Lock();
+ m_lock.lock();
if (is_closed()) {
- m_lock.Unlock();
+ m_lock.unlock();
on_ready->complete(-ESHUTDOWN);
return;
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
- m_lock.Lock();
+ m_lock.lock();
if (m_state != STATE_PREPARING_LOCK) {
- m_lock.Unlock();
+ m_lock.unlock();
return;
}
template <typename I>
bool ImageState<I>::is_closed() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return ((m_state == STATE_CLOSED) ||
(!m_actions_contexts.empty() &&
template <typename I>
void ImageState<I>::append_context(const Action &action, Context *context) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ActionContexts *action_contexts = nullptr;
for (auto &action_ctxs : m_actions_contexts) {
template <typename I>
void ImageState<I>::execute_next_action_unlock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (m_actions_contexts.front().first.action_type) {
case ACTION_TYPE_OPEN:
template <typename I>
void ImageState<I>::execute_action_unlock(const Action &action,
Context *on_finish) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, on_finish);
if (!is_transition_state()) {
execute_next_action_unlock();
} else {
- m_lock.Unlock();
+ m_lock.unlock();
}
}
template <typename I>
void ImageState<I>::complete_action_unlock(State next_state, int r) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
- m_lock.Unlock();
+ m_lock.unlock();
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
if (next_state != STATE_UNINITIALIZED && next_state != STATE_CLOSED) {
- m_lock.Lock();
+ m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action_unlock();
} else {
- m_lock.Unlock();
+ m_lock.unlock();
}
}
}
template <typename I>
void ImageState<I>::send_open_unlock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
image::OpenRequest<I> *req = image::OpenRequest<I>::create(
m_image_ctx, m_open_flags, ctx);
- m_lock.Unlock();
+ m_lock.unlock();
req->send();
}
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
}
- m_lock.Lock();
+ m_lock.lock();
complete_action_unlock(r < 0 ? STATE_UNINITIALIZED : STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_close_unlock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
image::CloseRequest<I> *req = image::CloseRequest<I>::create(
m_image_ctx, ctx);
- m_lock.Unlock();
+ m_lock.unlock();
req->send();
}
<< dendl;
}
- m_lock.Lock();
+ m_lock.lock();
complete_action_unlock(STATE_CLOSED, r);
}
template <typename I>
void ImageState<I>::send_refresh_unlock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
*m_image_ctx, false, false, ctx);
- m_lock.Unlock();
+ m_lock.unlock();
req->send();
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
- m_lock.Lock();
+ m_lock.lock();
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
template <typename I>
void ImageState<I>::send_set_snap_unlock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_SETTING_SNAP;
image::SetSnapRequest<I> *req = image::SetSnapRequest<I>::create(
*m_image_ctx, action_contexts.first.snap_id, ctx);
- m_lock.Unlock();
+ m_lock.unlock();
req->send();
}
lderr(cct) << "failed to set snapshot: " << cpp_strerror(r) << dendl;
}
- m_lock.Lock();
+ m_lock.lock();
complete_action_unlock(STATE_OPEN, r);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_PREPARING_LOCK;
ceph_assert(!m_actions_contexts.empty());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
Context *on_ready = action_contexts.first.on_ready;
- m_lock.Unlock();
+ m_lock.unlock();
if (on_ready == nullptr) {
complete_action_unlock(STATE_OPEN, 0);
#define CEPH_LIBRBD_IMAGE_STATE_H
#include "include/int_types.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include <list>
#include <string>
#include <utility>
ImageCtxT *m_image_ctx;
State m_state;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
ActionsContexts m_actions_contexts;
uint64_t m_last_refresh;
: Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid),
m_image_ctx(image_ctx),
m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)),
- m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)),
- m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this))
+ m_async_request_lock(ceph::make_shared_mutex(
+ util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))),
+ m_owner_client_id_lock(ceph::make_mutex(
+ util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)))
{
}
schedule_async_complete(request, r);
}
} else {
- RWLock::WLocker async_request_locker(m_async_request_lock);
+ std::unique_lock async_request_locker{m_async_request_lock};
m_async_pending.erase(request);
}
}
void ImageWatcher<I>::notify_flatten(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
bool allow_shrink,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_snap_rename(const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
template <typename I>
void ImageWatcher<I>::notify_rename(const std::string &image_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
template <typename I>
void ImageWatcher<I>::notify_update_features(uint64_t features, bool enabled,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_migrate(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
void ImageWatcher<I>::notify_sparsify(uint64_t request_id, size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
template <typename I>
void ImageWatcher<I>::cancel_async_requests() {
- RWLock::WLocker l(m_async_request_lock);
+ std::unique_lock l{m_async_request_lock};
for (std::map<AsyncRequestId, AsyncRequest>::iterator iter =
m_async_requests.begin();
iter != m_async_requests.end(); ++iter) {
template <typename I>
void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
- ceph_assert(m_owner_client_id_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock));
m_owner_client_id = client_id;
ldout(m_image_ctx.cct, 10) << this << " current lock owner: "
<< m_owner_client_id << dendl;
template <typename I>
ClientId ImageWatcher<I>::get_client_id() {
- RWLock::RLocker l(this->m_watch_lock);
+ std::shared_lock l{this->m_watch_lock};
return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle);
}
ClientId client_id = get_client_id();
{
- Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(client_id);
}
ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl;
{
- Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
template <typename I>
void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.exclusive_lock == nullptr) {
// exclusive lock dynamically disabled via image refresh
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
- RWLock::RLocker watch_locker(this->m_watch_lock);
+ std::shared_lock watch_locker{this->m_watch_lock};
if (this->is_registered(this->m_watch_lock)) {
ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl;
template <typename I>
void ImageWatcher<I>::notify_request_lock() {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine can be dynamically disabled or
// race with task cancel
template <typename I>
void ImageWatcher<I>::handle_request_lock(int r) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine cannot transition -- but can be
// dynamically disabled
void ImageWatcher<I>::notify_lock_owner(const Payload& payload,
Context *on_finish) {
ceph_assert(on_finish != nullptr);
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
encode(NotifyMessage(payload), bl);
template <typename I>
Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) {
- RWLock::WLocker async_request_locker(m_async_request_lock);
+ std::unique_lock async_request_locker{m_async_request_lock};
auto it = m_async_requests.find(id);
if (it != m_async_requests.end()) {
Context *on_complete = it->second.first;
ProgressContext& prog_ctx,
Context *on_finish) {
ceph_assert(on_finish != nullptr);
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id
<< dendl;
});
{
- RWLock::WLocker async_request_locker(m_async_request_lock);
+ std::unique_lock async_request_locker{m_async_request_lock};
m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx);
}
if (async_request_id.client_id == get_client_id()) {
return -ERESTART;
} else {
- RWLock::WLocker l(m_async_request_lock);
+ std::unique_lock l{m_async_request_lock};
if (m_async_pending.count(async_request_id) == 0) {
m_async_pending.insert(async_request_id);
*new_request = true;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
- Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (payload.client_id == m_owner_client_id) {
cancel_async_requests = false;
}
set_owner_client_id(payload.client_id);
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// potentially wake up the exclusive lock state machine now that
// a lock owner has advertised itself
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
- Mutex::Locker l(m_owner_client_id_lock);
+ std::lock_guard l{m_owner_client_id_lock};
if (payload.client_id != m_owner_client_id) {
ldout(m_image_ctx.cct, 10) << this << " unexpected owner: "
<< payload.client_id << " != "
}
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (cancel_async_requests &&
(m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner())) {
return true;
}
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
m_image_ctx.exclusive_lock->is_lock_owner()) {
int r = 0;
if (accept_request) {
ceph_assert(r == 0);
- Mutex::Locker owner_client_id_locker(m_owner_client_id_lock);
+ std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (!m_owner_client_id.is_valid()) {
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_async_request_lock);
+ std::shared_lock l{m_async_request_lock};
std::map<AsyncRequestId, AsyncRequest>::iterator req_it =
m_async_requests.find(payload.async_request_id);
if (req_it != m_async_requests.end()) {
bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const ResizePayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const RenamePayload& payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
bool ImageWatcher<I>::handle_payload(const MigratePayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r)) {
template <typename I>
bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload,
C_NotifyAck *ack_ctx) {
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_requests(&r) || r < 0) {
<< cpp_strerror(err) << dendl;
{
- Mutex::Locker l(m_owner_client_id_lock);
+ std::lock_guard l{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// update the lock cookie with the new watch handle
m_image_ctx.exclusive_lock->reacquire_lock(nullptr);
#define CEPH_LIBRBD_IMAGE_WATCHER_H
#include "cls/rbd/cls_rbd_types.h"
-#include "common/Mutex.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rbd/librbd.hpp"
#include "librbd/Watcher.h"
TaskFinisher<Task> *m_task_finisher;
- RWLock m_async_request_lock;
+ ceph::shared_mutex m_async_request_lock;
std::map<watch_notify::AsyncRequestId, AsyncRequest> m_async_requests;
std::set<watch_notify::AsyncRequestId> m_async_pending;
- Mutex m_owner_client_id_lock;
+ ceph::mutex m_owner_client_id_lock;
watch_notify::ClientId m_owner_client_id;
void handle_register_watch(int r);
journal::TagData *tag_data;
Context *on_finish;
- Mutex lock;
+ ceph::mutex lock = ceph::make_mutex("lock");
GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client,
journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
journal::TagData *tag_data, Context *on_finish)
: cct(cct), journaler(journaler), client(client), client_meta(client_meta),
- tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish), lock("lock") {
+ tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) {
}
/**
template <typename I>
Journal<I>::Journal(I &image_ctx)
: m_image_ctx(image_ctx), m_journaler(NULL),
- m_lock("Journal<I>::m_lock"), m_state(STATE_UNINITIALIZED),
+ m_state(STATE_UNINITIALIZED),
m_error_result(0), m_replay_handler(this), m_close_pending(false),
- m_event_lock("Journal<I>::m_event_lock"), m_event_tid(0),
+ m_event_tid(0),
m_blocking_writes(false), m_journal_replay(NULL),
m_metadata_listener(this) {
template <typename I>
bool Journal<I>::is_journal_supported(I &image_ctx) {
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
return ((image_ctx.features & RBD_FEATURE_JOURNALING) &&
!image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP);
}
Journaler journaler(image_ctx->md_ctx, image_ctx->id, IMAGE_CLIENT_ID, {},
nullptr);
- Mutex lock("lock");
+ ceph::mutex lock = ceph::make_mutex("lock");
journal::ImageClientMeta client_meta;
uint64_t tag_tid;
journal::TagData tag_data;
template <typename I>
bool Journal<I>::is_journal_ready() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return (m_state == STATE_READY);
}
template <typename I>
bool Journal<I>::is_journal_replaying() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return is_journal_replaying(m_lock);
}
template <typename I>
-bool Journal<I>::is_journal_replaying(const Mutex &) const {
- ceph_assert(m_lock.is_locked());
+bool Journal<I>::is_journal_replaying(const ceph::mutex &) const {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART ||
template <typename I>
bool Journal<I>::is_journal_appending() const {
- ceph_assert(m_image_ctx.image_lock.is_locked());
- Mutex::Locker locker(m_lock);
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+ std::lock_guard locker{m_lock};
return (m_state == STATE_READY &&
!m_image_ctx.get_journal_policy()->append_disabled());
}
void Journal<I>::wait_for_journal_ready(Context *on_ready) {
on_ready = create_async_context_callback(m_image_ctx, on_ready);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
on_ready->complete(m_error_result);
} else {
m_image_ctx.io_object_dispatcher->register_object_dispatch(
journal::ObjectDispatch<I>::create(&m_image_ctx, this));
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_UNINITIALIZED);
wait_for_steady_state(on_finish);
create_journaler();
});
on_finish = create_async_context_callback(m_image_ctx, on_finish);
- Mutex::Locker locker(m_lock);
- while (m_listener_notify) {
- m_listener_cond.Wait(m_lock);
- }
+ std::unique_lock locker{m_lock};
+ m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
Listeners listeners(m_listeners);
m_listener_notify = true;
- m_lock.Unlock();
+ m_lock.unlock();
for (auto listener : listeners) {
listener->handle_close();
}
- m_lock.Lock();
+ m_lock.lock();
m_listener_notify = false;
- m_listener_cond.Signal();
+ m_listener_cond.notify_all();
ceph_assert(m_state != STATE_UNINITIALIZED);
if (m_state == STATE_CLOSED) {
template <typename I>
bool Journal<I>::is_tag_owner() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return is_tag_owner(m_lock);
}
template <typename I>
-bool Journal<I>::is_tag_owner(const Mutex &) const {
- ceph_assert(m_lock.is_locked());
+bool Journal<I>::is_tag_owner(const ceph::mutex &) const {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
}
template <typename I>
uint64_t Journal<I>::get_tag_tid() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_tag_tid;
}
template <typename I>
journal::TagData Journal<I>::get_tag_data() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_tag_data;
}
journal::TagPredecessor predecessor;
predecessor.mirror_uuid = LOCAL_MIRROR_UUID;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock));
cls::journal::Client client;
ldout(cct, 20) << this << " " << __func__ << ": mirror_uuid=" << mirror_uuid
<< dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
journal::TagData tag_data;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
m_journaler->flush_commit_position(on_finish);
}
void Journal<I>::user_flushed() {
if (m_state == STATE_READY && !m_user_flushed.exchange(true) &&
m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
uint64_t tid;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
tid = ++m_event_tid;
}
{
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
m_events[tid] = Event(futures, offset, length, filter_ret_val);
}
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
"r=" << r << dendl;
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
<< "length=" << length << ", "
<< "r=" << r << dendl;
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
void Journal<I>::append_op_event(uint64_t op_tid,
journal::EventEntry &&event_entry,
Context *on_safe) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
Future future;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
future = m_journaler->append(m_tag_tid, bl);
Future op_start_future;
Future op_finish_future;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
// ready to commit op event
ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
m_journal_replay->replay_op_ready(op_tid, on_resume);
}
Future future;
{
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
future = wait_event(m_lock, tid, on_safe);
}
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "on_safe=" << on_safe << dendl;
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
wait_event(m_lock, tid, on_safe);
}
template <typename I>
-typename Journal<I>::Future Journal<I>::wait_event(Mutex &lock, uint64_t tid,
+typename Journal<I>::Future Journal<I>::wait_event(ceph::mutex &lock, uint64_t tid,
Context *on_safe) {
- ceph_assert(m_event_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_event_lock));
CephContext *cct = m_image_ctx.cct;
typename Events::iterator it = m_events.find(tid);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
ceph_assert(m_state == STATE_REPLAYING);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
ceph_assert(m_journaler == NULL);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
delete m_journal_replay;
m_journal_replay = NULL;
Journal<I>, &Journal<I>::handle_journal_destroyed>(this));
ctx = new FunctionContext(
[this, ctx](int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_journaler->shut_down(ctx);
});
m_async_journal_op_tracker.wait(m_image_ctx, ctx);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
template <typename I>
void Journal<I>::complete_event(typename Events::iterator it, int r) {
- ceph_assert(m_event_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_event_lock));
ceph_assert(m_state == STATE_READY);
CephContext *cct = m_image_ctx.cct;
template <typename I>
void Journal<I>::start_append() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_journaler->start_append(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_INITIALIZING);
if (r < 0) {
CephContext *cct = m_image_ctx.cct;
ReplayEntry replay_entry;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
bool cancel_ops = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
State state;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
state = m_state;
ceph_assert(r == 0);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_processing_entry);
m_processing_entry = false;
}
void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
CephContext *cct = m_image_ctx.cct;
- m_lock.Lock();
+ std::unique_lock locker{m_lock};
ceph_assert(m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
if (m_state == STATE_REPLAYING) {
// abort the replay if we have an error
transition_state(STATE_FLUSHING_RESTART, r);
- m_lock.Unlock();
+ locker.unlock();
// stop replay, shut down, and restart
Context* ctx = create_context_callback<
ldout(cct, 20) << this << " handle_replay_process_safe: "
<< "shut down replay" << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART);
}
} else if (m_state == STATE_FLUSHING_REPLAY) {
// end-of-replay flush in-progress -- we need to restart replay
transition_state(STATE_FLUSHING_RESTART, r);
- m_lock.Unlock();
return;
}
} else {
// only commit the entry if written successfully
m_journaler->committed(replay_entry);
}
- m_lock.Unlock();
}
template <typename I>
void Journal<I>::handle_flushing_restart(int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
template <typename I>
void Journal<I>::handle_flushing_replay() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STOPPING);
destroy_journaler(r);
<< dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
delete m_journaler;
m_journaler = nullptr;
Contexts on_safe_contexts;
{
- Mutex::Locker event_locker(m_event_lock);
+ std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
ceph_assert(it != m_events.end());
template <typename I>
void Journal<I>::stop_recording() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_journaler != NULL);
ceph_assert(m_state == STATE_READY);
void Journal<I>::transition_state(State state, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = state;
if (m_error_result == 0 && r < 0) {
template <typename I>
bool Journal<I>::is_steady_state() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_READY:
case STATE_CLOSED:
template <typename I>
void Journal<I>::wait_for_steady_state(Context *on_state) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!is_steady_state());
CephContext *cct = m_image_ctx.cct;
template <typename I>
int Journal<I>::is_resync_requested(bool *do_resync) {
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
return check_resync_requested(do_resync);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(do_resync != nullptr);
cls::journal::Client client;
util::AsyncOpTracker &async_op_tracker;
Context *on_finish = nullptr;
- Mutex lock;
+ ceph::mutex lock =
+ ceph::make_mutex("librbd::Journal::C_RefreshTags::lock");
uint64_t tag_tid = 0;
journal::TagData tag_data;
explicit C_RefreshTags(util::AsyncOpTracker &async_op_tracker)
- : async_op_tracker(async_op_tracker),
- lock("librbd::Journal::C_RefreshTags::lock") {
+ : async_op_tracker(async_op_tracker) {
async_op_tracker.start_op();
}
~C_RefreshTags() override {
template <typename I>
void Journal<I>::handle_metadata_updated() {
CephContext *cct = m_image_ctx.cct;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
return;
uint64_t tag_tid,
journal::TagData tag_data, int r) {
CephContext *cct = m_image_ctx.cct;
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": failed to refresh metadata: "
<< "refresh_sequence=" << refresh_sequence << ", "
<< "tag_tid=" << tag_tid << ", "
<< "tag_data=" << tag_data << dendl;
- while (m_listener_notify) {
- m_listener_cond.Wait(m_lock);
- }
+ m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
bool was_tag_owner = is_tag_owner(m_lock);
if (m_tag_tid < tag_tid) {
Listeners listeners(m_listeners);
m_listener_notify = true;
- m_lock.Unlock();
+ m_lock.unlock();
if (promoted_to_primary) {
for (auto listener : listeners) {
}
}
- m_lock.Lock();
+ m_lock.lock();
m_listener_notify = false;
- m_listener_cond.Signal();
+ m_listener_cond.notify_all();
}
template <typename I>
void Journal<I>::add_listener(journal::Listener *listener) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_listeners.insert(listener);
}
template <typename I>
void Journal<I>::remove_listener(journal::Listener *listener) {
- Mutex::Locker locker(m_lock);
- while (m_listener_notify) {
- m_listener_cond.Wait(m_lock);
- }
+ std::unique_lock locker{m_lock};
+ m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
m_listeners.erase(listener);
}
#include "include/interval_set.h"
#include "include/rados/librados_fwd.hpp"
#include "common/Cond.h"
-#include "common/Mutex.h"
-#include "common/Cond.h"
#include "common/WorkQueue.h"
#include "journal/Future.h"
#include "journal/JournalMetadataListener.h"
ContextWQ *m_work_queue = nullptr;
SafeTimer *m_timer = nullptr;
- Mutex *m_timer_lock = nullptr;
+ ceph::mutex *m_timer_lock = nullptr;
Journaler *m_journaler;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock = ceph::make_mutex("Journal<I>::m_lock");
State m_state;
uint64_t m_max_append_size = 0;
uint64_t m_tag_class = 0;
ReplayHandler m_replay_handler;
bool m_close_pending;
- Mutex m_event_lock;
+ ceph::mutex m_event_lock = ceph::make_mutex("Journal<I>::m_event_lock");
uint64_t m_event_tid;
Events m_events;
typedef std::set<journal::Listener *> Listeners;
Listeners m_listeners;
- Cond m_listener_cond;
+ ceph::condition_variable m_listener_cond;
bool m_listener_notify = false;
uint64_t m_refresh_sequence = 0;
- bool is_journal_replaying(const Mutex &) const;
- bool is_tag_owner(const Mutex &) const;
+ bool is_journal_replaying(const ceph::mutex &) const;
+ bool is_tag_owner(const ceph::mutex &) const;
uint64_t append_io_events(journal::EventType event_type,
const Bufferlists &bufferlists,
uint64_t offset, size_t length, bool flush_entry,
int filter_ret_val);
- Future wait_event(Mutex &lock, uint64_t tid, Context *on_safe);
+ Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe);
void create_journaler();
void destroy_journaler(int r);
const string& oid, Watcher *watcher, Mode mode,
bool blacklist_on_break_lock,
uint32_t blacklist_expire_seconds)
- : m_lock(unique_lock_name("librbd::ManagedLock<I>::m_lock", this)),
+ : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
m_work_queue(work_queue),
m_oid(oid),
template <typename I>
ManagedLock<I>::~ManagedLock() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
m_state == STATE_UNINITIALIZED);
if (m_state == STATE_UNINITIALIZED) {
template <typename I>
bool ManagedLock<I>::is_lock_owner() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return is_lock_owner(m_lock);
}
template <typename I>
-bool ManagedLock<I>::is_lock_owner(Mutex &lock) const {
+bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
bool lock_owner;
void ManagedLock<I>::shut_down(Context *on_shut_down) {
ldout(m_cct, 10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!is_state_shutdown());
if (m_state == STATE_WAITING_FOR_REGISTER) {
void ManagedLock<I>::acquire_lock(Context *on_acquired) {
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ESHUTDOWN;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ESHUTDOWN;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
void ManagedLock<I>::release_lock(Context *on_released) {
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ESHUTDOWN;
} else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
template <typename I>
void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state == STATE_WAITING_FOR_REGISTER) {
// restart the acquire lock process now that watch is valid
int r;
{
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ESHUTDOWN;
} else {
int r;
{
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ESHUTDOWN;
} else if (is_lock_owner(m_lock)) {
librados::ObjectReadOperation op;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
(m_mode == EXCLUSIVE ? LOCK_EXCLUSIVE :
LOCK_SHARED),
template <typename I>
void ManagedLock<I>::append_context(Action action, Context *ctx) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
template <typename I>
void ManagedLock<I>::execute_action(Action action, Context *ctx) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, ctx);
if (!is_transition_state()) {
template <typename I>
void ManagedLock<I>::execute_next_action() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (get_active_action()) {
case ACTION_ACQUIRE_LOCK:
template <typename I>
typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
return m_actions_contexts.front().first;
}
template <typename I>
void ManagedLock<I>::complete_active_action(State next_state, int r) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
- m_lock.Unlock();
+ m_lock.unlock();
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
- m_lock.Lock();
+ m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action();
template <typename I>
bool ManagedLock<I>::is_state_shutdown() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_PRE_SHUTTING_DOWN:
template <typename I>
void ManagedLock<I>::send_acquire_lock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_LOCKED) {
complete_active_action(STATE_LOCKED, 0);
return;
void ManagedLock<I>::handle_post_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0 && m_post_next_state == STATE_LOCKED) {
// release_lock without calling pre and post handlers
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, m_cookie,
new FunctionContext([this, r](int ret) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(ret == 0);
complete_active_action(STATE_UNLOCKED, r);
}));
template <typename I>
void ManagedLock<I>::send_reacquire_lock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state != STATE_LOCKED) {
complete_active_action(m_state, 0);
void ManagedLock<I>::handle_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_REACQUIRING);
if (r < 0) {
template <typename I>
void ManagedLock<I>::release_acquire_lock() {
- assert(m_lock.is_locked());
+ assert(ceph_mutex_is_locked(m_lock));
if (!is_state_shutdown()) {
// queue a release and re-acquire of the lock since cookie cannot
template <typename I>
void ManagedLock<I>::send_release_lock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
complete_active_action(STATE_UNLOCKED, 0);
return;
ldout(m_cct, 10) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_PRE_RELEASING);
m_state = STATE_RELEASING;
}
void ManagedLock<I>::handle_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_RELEASING);
if (r >= 0 || r == -EBLACKLISTED || r == -ENOENT) {
void ManagedLock<I>::handle_post_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
complete_active_action(m_post_next_state, r);
}
template <typename I>
void ManagedLock<I>::send_shutdown() {
ldout(m_cct, 10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
m_state = STATE_SHUTTING_DOWN;
m_work_queue->queue(new FunctionContext([this](int r) {
ceph_assert(m_state == STATE_LOCKED);
m_state = STATE_PRE_SHUTTING_DOWN;
- m_lock.Unlock();
+ m_lock.unlock();
m_work_queue->queue(new C_ShutDownRelease(this), 0);
- m_lock.Lock();
+ m_lock.lock();
}
template <typename I>
void ManagedLock<I>::send_shutdown_release() {
ldout(m_cct, 10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_work_queue->queue(new FunctionContext([this](int r) {
pre_release_lock_handler(true, create_context_callback<
std::string cookie;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
cookie = m_cookie;
ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
ActionContexts action_contexts;
{
- Mutex::Locker locker(m_lock);
- ceph_assert(m_lock.is_locked());
+ std::lock_guard locker{m_lock};
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_actions_contexts.size() == 1);
action_contexts = std::move(m_actions_contexts.front());
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
#include "cls/lock/cls_lock_types.h"
#include "librbd/watcher/Types.h"
#include "librbd/managed_lock/Types.h"
int assert_header_locked();
bool is_shutdown() const {
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
return is_state_shutdown();
}
protected:
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
inline void set_state_uninitialized() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNLOCKED);
m_state = STATE_UNINITIALIZED;
}
inline void set_state_initializing() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED);
m_state = STATE_INITIALIZING;
}
inline void set_state_unlocked() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
m_state = STATE_UNLOCKED;
}
inline void set_state_waiting_for_lock() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_WAITING_FOR_LOCK;
}
inline void set_state_post_acquiring() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_POST_ACQUIRING;
}
bool is_state_shutdown() const;
inline bool is_state_acquiring() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_ACQUIRING;
}
inline bool is_state_post_acquiring() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_POST_ACQUIRING;
}
inline bool is_state_releasing() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_RELEASING;
}
inline bool is_state_pre_releasing() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_PRE_RELEASING;
}
inline bool is_state_locked() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_LOCKED;
}
inline bool is_state_waiting_for_lock() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_WAITING_FOR_LOCK;
}
inline bool is_action_acquire_lock() const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return get_active_action() == ACTION_ACQUIRE_LOCK;
}
ActionsContexts m_actions_contexts;
AsyncOpTracker m_async_op_tracker;
- bool is_lock_owner(Mutex &lock) const;
+ bool is_lock_owner(ceph::mutex &lock) const;
bool is_transition_state() const;
void append_context(Action action, Context *ctx);
template <typename I>
ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
: m_image_ctx(image_ctx), m_snap_id(snap_id),
- m_lock(util::unique_lock_name("librbd::ObjectMap::lock", this)),
+ m_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ObjectMap::lock", this))),
m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
}
template <typename I>
uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
{
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
template <typename I>
bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
{
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
template <typename I>
bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
{
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t state = *it;
if ((state == new_state) ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
template <typename I>
bool ObjectMap<I>::set_object_map(ceph::BitVector<2> &target_object_map) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
m_object_map = target_object_map;
return true;
}
template <typename I>
void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
object_map::SnapshotRollbackRequest *req =
new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, on_finish);
req->send();
template <typename I>
void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
template <typename I>
void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
- ceph_assert(m_image_ctx.image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
template <typename I>
void ObjectMap<I>::aio_save(Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
template <typename I>
void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
ceph_assert(m_image_ctx.image_watcher != NULL);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
- ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+ ceph_assert(ceph_mutex_is_wlocked(m_lock));
BlockGuardCell *cell;
int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
m_update_guard->release(cell, &block_ops);
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
- RWLock::WLocker locker(m_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
+ std::unique_lock locker{m_lock};
for (auto &op : block_ops) {
detained_aio_update(std::move(op));
}
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish) {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(m_image_ctx.image_watcher != nullptr);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
stringify(static_cast<uint32_t>(*current_state)) : "")
<< "->" << static_cast<uint32_t>(new_state) << dendl;
if (snap_id == CEPH_NOSNAP) {
- ceph_assert(m_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_lock));
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
ldout(cct, 20) << "skipping update of invalid object map" << dendl;
uint8_t operator[](uint64_t object_no) const;
inline uint64_t size() const {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
return m_object_map.size();
}
inline void set_state(uint64_t object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state) {
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
if (current_state && m_object_map[object_no] != *current_state) {
return;
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
ceph_assert(start_object_no < end_object_no);
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
if (snap_id == CEPH_NOSNAP) {
end_object_no = std::min(end_object_no, m_object_map.size());
ImageCtxT &m_image_ctx;
uint64_t m_snap_id;
- RWLock m_lock;
+ mutable ceph::shared_mutex m_lock;
ceph::BitVector<2> m_object_map;
UpdateGuard *m_update_guard = nullptr;
void send_acquire_exclusive_lock() {
// context can complete before owner_lock is unlocked
- RWLock &owner_lock(image_ctx.owner_lock);
- owner_lock.get_read();
- image_ctx.image_lock.get_read();
+ ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
+ owner_lock.lock_shared();
+ image_ctx.image_lock.lock_shared();
if (image_ctx.read_only ||
(!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) {
- image_ctx.image_lock.put_read();
- owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ owner_lock.unlock_shared();
complete(-EROFS);
return;
}
- image_ctx.image_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
if (image_ctx.exclusive_lock == nullptr) {
send_local_request();
- owner_lock.put_read();
+ owner_lock.unlock_shared();
return;
} else if (image_ctx.image_watcher == nullptr) {
- owner_lock.put_read();
+ owner_lock.unlock_shared();
complete(-EROFS);
return;
}
if (image_ctx.exclusive_lock->is_lock_owner() &&
image_ctx.exclusive_lock->accept_requests()) {
send_local_request();
- owner_lock.put_read();
+ owner_lock.unlock_shared();
return;
}
} else {
image_ctx.exclusive_lock->try_acquire_lock(ctx);
}
- owner_lock.put_read();
+ owner_lock.unlock_shared();
}
void handle_acquire_exclusive_lock(int r) {
}
// context can complete before owner_lock is unlocked
- RWLock &owner_lock(image_ctx.owner_lock);
- owner_lock.get_read();
+ ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
+ owner_lock.lock_shared();
if (image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner()) {
send_local_request();
- owner_lock.put_read();
+ owner_lock.unlock_shared();
return;
}
send_remote_request();
- owner_lock.put_read();
+ owner_lock.unlock_shared();
}
void send_remote_request() {
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
}
void send_local_request() {
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
return -EINVAL;
template <typename I>
void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
// can't flatten a non-clone
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be flattened" << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout,
overlap);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
operation::FlattenRequest<I> *req = new operation::FlattenRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), overlap_objects,
template <typename I>
void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<I> handle_mismatch,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
} else {
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_rename(dstname, &cond_ctx);
}
template <typename I>
void Operations<I>::execute_rename(const std::string &dest_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.name == dest_name) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name
m_image_ctx.image_watcher->register_watch(on_finish);
});
on_finish = new FunctionContext([this, dest_name, on_finish](int r) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
m_image_ctx, on_finish, dest_name);
req->send();
int Operations<I>::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) {
CephContext *cct = m_image_ctx.cct;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
ldout(cct, 5) << this << " " << __func__ << ": "
<< "size=" << m_image_ctx.size << ", "
<< "new_size=" << size << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish,
uint64_t journal_op_tid) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
ldout(cct, 5) << this << " " << __func__ << ": "
<< "size=" << m_image_ctx.size << ", "
<< "new_size=" << size << dendl;
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only ||
m_image_ctx.operations_disabled) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
} else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock) &&
!ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
operation::ResizeRequest<I> *req = new operation::ResizeRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), size, allow_shrink,
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
m_image_ctx, "snap_create", true,
Context *on_finish,
uint64_t journal_op_tid,
bool skip_object_map) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
operation::SnapshotCreateRequest<I> *req =
new operation::SnapshotCreateRequest<I>(
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
{
// need to drop image_lock before invalidating cache
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (!m_image_ctx.snap_exists) {
return -ENOENT;
}
const std::string &snap_name,
ProgressContext& prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "No such snapshot found." << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
uint64_t new_size = m_image_ctx.get_image_size(snap_id);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
// async mode used for journal replay
operation::SnapshotRollbackRequest<I> *request =
}
// quickly filter out duplicate ops
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 ||
(m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (proxy_op) {
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
{-ENOENT}, on_finish);
req->send();
} else {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_remove(snap_namespace, snap_name, on_finish);
}
}
void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
{
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(m_image_ctx.cct) << "No such snapshot found." << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool is_protected;
int r = m_image_ctx.is_snap_protected(snap_id, &is_protected);
if (r < 0) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
lderr(m_image_ctx.cct) << "snapshot is protected" << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
operation::SnapshotRemoveRequest<I> *req =
new operation::SnapshotRemoveRequest<I>(
return r;
{
- RWLock::RLocker l(m_image_ctx.image_lock);
+ std::shared_lock l{m_image_ctx.image_lock};
snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
} else {
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_rename(snap_id, dstname, &cond_ctx);
}
void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(),
dest_snap_name) != CEPH_NOSNAP) {
// Renaming is supported for snapshots from user namespace only.
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": "
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_protected;
r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
} else {
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_protect(snap_namespace, snap_name, &cond_ctx);
}
void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
bool is_protected;
int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
if (r < 0) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_unprotected;
r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
} else {
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx);
}
void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
bool is_unprotected;
int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
if (r < 0) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_unprotected) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
C_SaferCond limit_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(true);
if (r < 0) {
return r;
template <typename I>
void Operations<I>::execute_snap_set_limit(const uint64_t limit,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
return -EINVAL;
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (enabled && (features & m_image_ctx.features) != 0) {
lderr(cct) << "one or more requested features are already enabled"
<< dendl;
// when acquiring the exclusive lock in case the journal is corrupt
bool disabling_journal = false;
if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) {
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(new journal::DisabledPolicy());
disabling_journal = true;
}
BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) {
if (disabling_journal) {
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(
new journal::StandardPolicy<I>(&m_image_ctx));
}
if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) {
C_SaferCond cond_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(true);
if (r < 0) {
return r;
void Operations<I>::execute_update_features(uint64_t features, bool enabled,
Context *on_finish,
uint64_t journal_op_tid) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
C_SaferCond metadata_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(true);
if (r < 0) {
return r;
void Operations<I>::execute_metadata_set(const std::string &key,
const std::string &value,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
C_SaferCond metadata_ctx;
{
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(true);
if (r < 0) {
return r;
template <typename I>
void Operations<I>::execute_metadata_remove(const std::string &key,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
return -EINVAL;
template <typename I>
void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
return;
}
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be migrated" << dendl;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
operation::MigrateRequest<I> *req = new operation::MigrateRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
void Operations<I>::execute_sparsify(size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
template <typename I>
int Operations<I>::prepare_image_update(bool request_lock) {
- ceph_assert(m_image_ctx.owner_lock.is_locked() &&
- !m_image_ctx.owner_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock));
if (m_image_ctx.image_watcher == nullptr) {
return -EROFS;
}
// need to upgrade to a write lock
C_SaferCond ctx;
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
bool attempting_lock = false;
{
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+ std::unique_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
(!m_image_ctx.exclusive_lock->is_lock_owner() ||
!m_image_ctx.exclusive_lock->accept_requests())) {
r = ctx.wait();
}
- m_image_ctx.owner_lock.get_read();
+ m_image_ctx.owner_lock.lock_shared();
if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) {
m_image_ctx.exclusive_lock->unblock_requests();
}
#include "include/Context.h"
#include "common/ceph_context.h"
#include "common/Finisher.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include <map>
#include <utility>
namespace librbd {
struct TaskFinisherSingleton {
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock");
SafeTimer *m_safe_timer;
Finisher *m_finisher;
- explicit TaskFinisherSingleton(CephContext *cct)
- : m_lock("librbd::TaskFinisher::m_lock") {
+ explicit TaskFinisherSingleton(CephContext *cct) {
m_safe_timer = new SafeTimer(cct, m_lock, false);
m_safe_timer->init();
m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd");
}
virtual ~TaskFinisherSingleton() {
{
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
m_safe_timer->shutdown();
delete m_safe_timer;
}
}
void cancel(const Task& task) {
- Mutex::Locker l(*m_lock);
+ std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
delete it->second.first;
void cancel_all(Context *comp) {
{
- Mutex::Locker l(*m_lock);
+ std::lock_guard l{*m_lock};
for (typename TaskContexts::iterator it = m_task_contexts.begin();
it != m_task_contexts.end(); ++it) {
delete it->second.first;
}
bool add_event_after(const Task& task, double seconds, Context *ctx) {
- Mutex::Locker l(*m_lock);
+ std::lock_guard l{*m_lock};
if (m_task_contexts.count(task) != 0) {
// task already scheduled on finisher or timer
delete ctx;
}
bool queue(const Task& task, Context *ctx) {
- Mutex::Locker l(*m_lock);
+ std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
if (it->second.second != NULL) {
CephContext &m_cct;
- Mutex *m_lock;
+ ceph::mutex *m_lock;
Finisher *m_finisher;
SafeTimer *m_safe_timer;
void complete(const Task& task) {
Context *ctx = NULL;
{
- Mutex::Locker l(*m_lock);
+ std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
ctx = it->second.first;
const string& oid)
: m_ioctx(ioctx), m_work_queue(work_queue), m_oid(oid),
m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
- m_watch_lock(util::unique_lock_name("librbd::Watcher::m_watch_lock", this)),
+ m_watch_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::Watcher::m_watch_lock", this))),
m_watch_handle(0), m_notifier(work_queue, ioctx, oid),
m_watch_state(WATCH_STATE_IDLE), m_watch_ctx(*this) {
}
Watcher::~Watcher() {
- RWLock::RLocker l(m_watch_lock);
+ std::shared_lock l{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
}
void Watcher::register_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_watch_state = WATCH_STATE_REGISTERING;
m_watch_blacklisted = false;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REGISTERING);
m_watch_state = WATCH_STATE_IDLE;
ldout(m_cct, 10) << dendl;
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
if (m_watch_state != WATCH_STATE_IDLE) {
ldout(m_cct, 10) << "delaying unregister until register completed"
<< dendl;
}
bool Watcher::notifications_blocked() const {
- RWLock::RLocker locker(m_watch_lock);
+ std::shared_lock locker{m_watch_lock};
bool blocked = (m_blocked_count > 0);
ldout(m_cct, 5) << "blocked=" << blocked << dendl;
void Watcher::block_notifies(Context *on_finish) {
{
- RWLock::WLocker locker(m_watch_lock);
+ std::unique_lock locker{m_watch_lock};
++m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
}
void Watcher::unblock_notifies() {
- RWLock::WLocker locker(m_watch_lock);
+ std::unique_lock locker{m_watch_lock};
ceph_assert(m_blocked_count > 0);
--m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
std::string Watcher::get_oid() const {
- RWLock::RLocker locker(m_watch_lock);
+ std::shared_lock locker{m_watch_lock};
return m_oid;
}
void Watcher::set_oid(const string& oid) {
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_oid = oid;
void Watcher::handle_error(uint64_t handle, int err) {
lderr(m_cct) << "handle=" << handle << ": " << cpp_strerror(err) << dendl;
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
m_watch_error = true;
if (is_registered(m_watch_lock)) {
Context *unregister_watch_ctx = nullptr;
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
m_watch_blacklisted = false;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
#define CEPH_LIBRBD_WATCHER_H
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/RWLock.h"
#include "include/rados/librados.hpp"
#include "librbd/watcher/Notifier.h"
void set_oid(const string& oid);
uint64_t get_watch_handle() const {
- RWLock::RLocker watch_locker(m_watch_lock);
+ std::shared_lock watch_locker{m_watch_lock};
return m_watch_handle;
}
bool is_registered() const {
- RWLock::RLocker locker(m_watch_lock);
+ std::shared_lock locker{m_watch_lock};
return is_registered(m_watch_lock);
}
bool is_unregistered() const {
- RWLock::RLocker locker(m_watch_lock);
+ std::shared_lock locker{m_watch_lock};
return is_unregistered(m_watch_lock);
}
bool is_blacklisted() const {
- RWLock::RLocker locker(m_watch_lock);
+ std::shared_lock locker{m_watch_lock};
return m_watch_blacklisted;
}
ContextWQ *m_work_queue;
std::string m_oid;
CephContext *m_cct;
- mutable RWLock m_watch_lock;
+ mutable ceph::shared_mutex m_watch_lock;
uint64_t m_watch_handle;
watcher::Notifier m_notifier;
AsyncOpTracker m_async_op_tracker;
- bool is_registered(const RWLock&) const {
+ bool is_registered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0);
}
- bool is_unregistered(const RWLock&) const {
+ bool is_unregistered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0);
}
#include "include/rados/librados.hpp"
#include "include/interval_set.h"
#include "common/errno.h"
+#include "common/Cond.h"
#include "common/Throttle.h"
#include "osdc/Striper.h"
#include "librados/snap_set_diff.h"
// ensure previous writes are visible to listsnaps
C_SaferCond flush_ctx;
{
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, ictx,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec<I>::create_flush_request(
return r;
}
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &len);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (r < 0) {
return r;
}
uint64_t from_size = 0;
uint64_t end_size;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
head_ctx.dup(m_image_ctx.data_ctx);
if (m_from_snap_name) {
from_snap_id = m_image_ctx.get_snap_id(m_from_snap_namespace, m_from_snap_name);
bool fast_diff_enabled = false;
BitVector<2> object_diff_state;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_whole_object && (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
r = diff_object_map(from_snap_id, end_snap_id, &object_diff_state);
if (r < 0) {
DiffContext diff_context(m_image_ctx, m_callback, m_callback_arg,
m_whole_object, from_snap_id, end_snap_id);
if (m_include_parent && from_snap_id == 0) {
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
uint64_t overlap = 0;
m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &overlap);
r = 0;
template <typename I>
int DiffIterate<I>::diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
BitVector<2>* object_diff_state) {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
CephContext* cct = m_image_ctx.cct;
bool diff_from_start = (from_snap_id == 0);
template <typename I>
snap_t get_group_snap_id(I* ictx,
const cls::rbd::SnapshotNamespace& in_snap_namespace) {
- ceph_assert(ictx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
auto it = ictx->snap_ids.lower_bound({in_snap_namespace, ""});
if (it != ictx->snap_ids.end() && it->first.first == in_snap_namespace) {
return it->second;
on_finishes[i] = new C_SaferCond;
std::string snap_name;
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "removing individual snapshot from image " << ictx->name
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
- RWLock::RLocker owner_lock(ictx->owner_lock);
+ std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
- RWLock::RLocker owner_lock(ictx->owner_lock);
+ std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ImageCtx *ictx = ictxs[i];
on_finishes[i] = new C_SaferCond;
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
std::string snap_name;
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "rolling back to individual snapshot for image " << ictx->name
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
- RWLock::RLocker owner_lock(ictx->owner_lock);
+ std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
- RWLock::RLocker owner_lock(ictx->owner_lock);
+ std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ret_code = r;
} else {
ImageCtx *ictx = ictxs[i];
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (snap_id == CEPH_NOSNAP) {
ldout(cct, 20) << "Couldn't find created snapshot with namespace: "
<< ne << dendl;
on_finishes[i] = new C_SaferCond;
std::string snap_name;
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (r >= 0) {
ictx->operations->snap_remove(ne, snap_name.c_str(), on_finishes[i]);
} else {
return r;
}
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
*op_features = ictx->op_features;
return 0;
}
return r;
}
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
bool release_image_lock = false;
BOOST_SCOPE_EXIT_ALL(ictx, &release_image_lock) {
if (release_image_lock) {
- ictx->parent->image_lock.put_read();
+ ictx->parent->image_lock.unlock_shared();
}
};
auto parent = ictx->parent;
if (!ictx->migration_info.empty() && ictx->parent != nullptr) {
release_image_lock = true;
- ictx->parent->image_lock.get_read();
+ ictx->parent->image_lock.lock_shared();
parent = ictx->parent->parent;
}
parent_image->pool_name = parent->md_ctx.get_pool_name();
parent_image->pool_namespace = parent->md_ctx.get_namespace();
- RWLock::RLocker parent_image_locker(parent->image_lock);
+ std::shared_lock parent_image_locker{parent->image_lock};
parent_snap->id = parent->snap_id;
parent_snap->namespace_type = RBD_SNAP_NAMESPACE_TYPE_USER;
if (parent->snap_id != CEPH_NOSNAP) {
int Image<I>::list_descendants(
I *ictx, const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
std::vector<librados::snap_t> snap_ids;
if (ictx->snap_id != CEPH_NOSNAP) {
snap_ids.push_back(ictx->snap_id);
uint64_t features;
uint64_t src_size;
{
- RWLock::RLocker image_locker(src->image_lock);
+ std::shared_lock image_locker{src->image_lock};
if (!src->migration_info.empty()) {
lderr(cct) << "cannot deep copy migrating image" << dendl;
if (flatten > 0) {
parent_spec.pool_id = -1;
} else {
- RWLock::RLocker image_locker(src->image_lock);
+ std::shared_lock image_locker{src->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!src->snap_info.empty()) {
C_SaferCond lock_ctx;
{
- RWLock::WLocker locker(dest->owner_lock);
+ std::unique_lock locker{dest->owner_lock};
if (dest->exclusive_lock == nullptr ||
dest->exclusive_lock->is_lock_owner()) {
librados::snap_t snap_id_start = 0;
librados::snap_t snap_id_end;
{
- RWLock::RLocker image_locker(src->image_lock);
+ std::shared_lock image_locker{src->image_lock};
snap_id_end = src->snap_id;
}
uint64_t snap_id = CEPH_NOSNAP;
std::string name(snap_name == nullptr ? "" : snap_name);
if (!name.empty()) {
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace{},
snap_name);
if (snap_id == CEPH_NOSNAP) {
#include "include/stringify.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
ProgressContext *prog_ctx)
: m_io_ctx(io_ctx), m_header_oid(header_oid), m_state(state),
m_prog_ctx(prog_ctx), m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
- m_lock(util::unique_lock_name("librbd::api::MigrationProgressContext",
- this)) {
+ m_lock(ceph::make_mutex(
+ util::unique_lock_name("librbd::api::MigrationProgressContext",
+ this))) {
ceph_assert(m_prog_ctx != nullptr);
}
ProgressContext *m_prog_ctx;
CephContext* m_cct;
- mutable Mutex m_lock;
- Cond m_cond;
+ mutable ceph::mutex m_lock;
+ ceph::condition_variable m_cond;
std::string m_state_description;
bool m_pending_update = false;
int m_in_flight_state_updates = 0;
void send_state_description_update(const std::string &description) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (description == m_state_description) {
return;
void set_state_description() {
ldout(m_cct, 20) << "state_description=" << m_state_description << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
cls_client::migration_set_state(&op, m_state, m_state_description);
void handle_set_state_description(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_in_flight_state_updates--;
set_state_description();
m_pending_update = false;
} else {
- m_cond.Signal();
+ m_cond.notify_all();
}
}
void wait_for_in_flight_updates() {
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
ldout(m_cct, 20) << "m_in_flight_state_updates="
<< m_in_flight_state_updates << dendl;
-
m_pending_update = false;
- while (m_in_flight_state_updates > 0) {
- m_cond.Wait(m_lock);
- }
+ m_cond.wait(locker, [this] { return m_in_flight_state_updates <= 0; });
}
};
uint64_t features;
{
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock image_locker{image_ctx->image_lock};
features = image_ctx->features;
}
opts.get(RBD_IMAGE_OPTION_FEATURES, &features);
m_prog_ctx);
r = dst_image_ctx->operations->migrate(prog_ctx);
if (r == -EROFS) {
- RWLock::RLocker owner_locker(dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{dst_image_ctx->owner_lock};
if (dst_image_ctx->exclusive_lock != nullptr &&
!dst_image_ctx->exclusive_lock->accept_ops()) {
ldout(m_cct, 5) << "lost exclusive lock, retrying remote" << dendl;
int r;
- m_src_image_ctx->owner_lock.get_read();
+ m_src_image_ctx->owner_lock.lock_shared();
if (m_src_image_ctx->exclusive_lock != nullptr &&
!m_src_image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
m_src_image_ctx->exclusive_lock->acquire_lock(&ctx);
- m_src_image_ctx->owner_lock.put_read();
+ m_src_image_ctx->owner_lock.unlock_shared();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error acquiring exclusive lock: " << cpp_strerror(r)
return r;
}
} else {
- m_src_image_ctx->owner_lock.put_read();
+ m_src_image_ctx->owner_lock.unlock_shared();
}
group_info_t group_info;
}
for (auto &snap : snaps) {
- RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+ std::shared_lock image_locker{m_src_image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{m_src_image_ctx->md_ctx.get_id(),
m_src_image_ctx->md_ctx.get_namespace(),
m_src_image_ctx->id, snap.id};
int Migration<I>::v2_unlink_src_image() {
ldout(m_cct, 10) << dendl;
- m_src_image_ctx->owner_lock.get_read();
+ m_src_image_ctx->owner_lock.lock_shared();
if (m_src_image_ctx->exclusive_lock != nullptr &&
m_src_image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
m_src_image_ctx->exclusive_lock->release_lock(&ctx);
- m_src_image_ctx->owner_lock.put_read();
+ m_src_image_ctx->owner_lock.unlock_shared();
int r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error releasing exclusive lock: " << cpp_strerror(r)
return r;
}
} else {
- m_src_image_ctx->owner_lock.put_read();
+ m_src_image_ctx->owner_lock.unlock_shared();
}
int r = Trash<I>::move(m_src_io_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION,
uint64_t size;
cls::rbd::ParentImageSpec parent_spec;
{
- RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+ std::shared_lock image_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->size;
// use oldest snapshot or HEAD for parent spec
} BOOST_SCOPE_EXIT_END;
{
- RWLock::RLocker owner_locker(dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{dst_image_ctx->owner_lock};
r = dst_image_ctx->operations->prepare_image_update(true);
if (r < 0) {
lderr(m_cct) << "cannot obtain exclusive lock" << dendl;
// Also collect the list of the children currently attached to the
// source, so we could make a proper decision later about relinking.
- RWLock::RLocker src_image_locker(to_image_ctx->image_lock);
+ std::shared_lock src_image_locker{to_image_ctx->image_lock};
cls::rbd::ParentImageSpec src_parent_spec{to_image_ctx->md_ctx.get_id(),
to_image_ctx->md_ctx.get_namespace(),
to_image_ctx->id, snap.id};
return r;
}
- RWLock::RLocker image_locker(from_image_ctx->image_lock);
+ std::shared_lock image_locker{from_image_ctx->image_lock};
snap.id = from_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap.name);
if (snap.id == CEPH_NOSNAP) {
std::vector<librbd::linked_image_spec_t> child_images;
{
- RWLock::RLocker image_locker(from_image_ctx->image_lock);
+ std::shared_lock image_locker{from_image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{from_image_ctx->md_ctx.get_id(),
from_image_ctx->md_ctx.get_namespace(),
from_image_ctx->id, snap.id};
librados::snap_t to_snap_id;
{
- RWLock::RLocker image_locker(to_image_ctx->image_lock);
+ std::shared_lock image_locker{to_image_ctx->image_lock};
to_snap_id = to_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
from_snap.name);
if (to_snap_id == CEPH_NOSNAP) {
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap;
{
- RWLock::RLocker image_locker(child_image_ctx->image_lock);
+ std::shared_lock image_locker{child_image_ctx->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!child_image_ctx->snap_info.empty()) {
// is mirroring not enabled for the parent?
{
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
ImageCtx *parent = ictx->parent;
if (parent) {
if (relax_same_pool_parent_check &&
};
{
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
for (auto &info : snap_info) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
+#include "common/Cond.h"
#include "common/Throttle.h"
#include "cls/rbd/cls_rbd_client.h"
#include "osd/osd_types.h"
return r;
}
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
return r;
}
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
return r;
}
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
cls::rbd::SnapshotNamespace snapshot_namespace;
std::string snapshot_name;
{
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
auto it = ictx->snap_info.find(snap_id);
if (it == ictx->snap_info.end()) {
return -ENOENT;
if (r == 0) {
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
- RWLock::WLocker image_locker(ictx->image_lock);
+ std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new journal::DisabledPolicy());
}
- ictx->owner_lock.get_read();
+ ictx->owner_lock.lock_shared();
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(0);
r = ictx->operations->prepare_image_update(false);
if (r < 0) {
lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
- ictx->owner_lock.put_read();
+ ictx->owner_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
}
- ictx->owner_lock.put_read();
+ ictx->owner_lock.unlock_shared();
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
if (!ictx->migration_info.empty()) {
lderr(cct) << "cannot move migrating image to trash" << dendl;
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
r = disable_mirroring<I>(ictx);
if (r < 0) {
}
void finish(int r) override {
- ceph_assert(dispatcher->m_cache_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(dispatcher->m_cache_lock));
auto cct = dispatcher->m_image_ctx->cct;
if (r == -EBLACKLISTED) {
I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
: m_image_ctx(image_ctx), m_max_dirty(max_dirty),
m_writethrough_until_flush(writethrough_until_flush),
- m_cache_lock(util::unique_lock_name(
- "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this)) {
+ m_cache_lock(ceph::make_mutex(util::unique_lock_name(
+ "librbd::cache::ObjectCacherObjectDispatch::cache_lock", this))) {
}
template <typename I>
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
- m_cache_lock.Lock();
+ m_cache_lock.lock();
ldout(cct, 5) << "enabling caching..." << dendl;
m_writeback_handler = new ObjectCacherWriteback(m_image_ctx, m_cache_lock);
m_object_set = new ObjectCacher::ObjectSet(nullptr,
m_image_ctx->data_ctx.get_id(), 0);
m_object_cacher->start();
- m_cache_lock.Unlock();
+ m_cache_lock.unlock();
// add ourself to the IO object dispatcher chain
if (m_max_dirty > 0) {
on_finish = new C_InvalidateCache(this, true, on_finish);
// flush all pending writeback state
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
- m_cache_lock.Unlock();
}
template <typename I>
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
auto rd = m_object_cacher->prepare_read(snap_id, read_data, op_flags);
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no,
object_off, object_len, 0);
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
- m_cache_lock.Lock();
+ m_cache_lock.lock();
int r = m_object_cacher->readx(rd, m_object_set, on_dispatched, &trace);
- m_cache_lock.Unlock();
+ m_cache_lock.unlock();
if (r != 0) {
on_dispatched->complete(r);
}
auto ctx = *on_finish;
*on_finish = new FunctionContext(
[this, object_extents, ctx](int r) {
- m_cache_lock.Lock();
+ m_cache_lock.lock();
m_object_cacher->discard_set(m_object_set, object_extents);
- m_cache_lock.Unlock();
+ m_cache_lock.unlock();
ctx->complete(r);
});
// ensure any in-flight writeback is complete before advancing
// the discard request
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
m_object_cacher->discard_writeback(m_object_set, object_extents,
on_dispatched);
- m_cache_lock.Unlock();
return true;
}
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
ObjectCacher::OSDWrite *wr = m_object_cacher->prepare_write(
snapc, data, ceph::real_time::min(), op_flags, *journal_tid);
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ObjectExtent extent(data_object_name(m_image_ctx, object_no),
object_no, object_off, data.length(), 0);
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
m_object_cacher->writex(wr, m_object_set, on_dispatched, &trace);
- m_cache_lock.Unlock();
return true;
}
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, cmp_data.length(), 0);
- Mutex::Locker cache_locker(m_cache_lock);
+ std::lock_guard cache_locker{m_cache_lock};
m_object_cacher->flush_set(m_object_set, object_extents, &trace,
on_dispatched);
return true;
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
m_user_flushed = true;
if (m_writethrough_until_flush && m_max_dirty > 0) {
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_object_cacher->flush_set(m_object_set, on_dispatched);
- m_cache_lock.Unlock();
return true;
}
// invalidate any remaining cache entries
on_finish = new C_InvalidateCache(this, false, on_finish);
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
- m_cache_lock.Unlock();
return true;
}
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
- m_cache_lock.Lock();
+ std::lock_guard locker{m_cache_lock};
m_object_cacher->clear_nonexistence(m_object_set);
- m_cache_lock.Unlock();
-
return false;
}
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "osdc/ObjectCacher.h"
struct WritebackHandler;
size_t m_max_dirty;
bool m_writethrough_until_flush;
- Mutex m_cache_lock;
+ ceph::mutex m_cache_lock;
ObjectCacher *m_object_cacher = nullptr;
ObjectCacher::ObjectSet *m_object_set = nullptr;
#include "librbd/cache/ObjectCacherWriteback.h"
#include "common/ceph_context.h"
#include "common/dout.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "osdc/Striper.h"
#include "include/Context.h"
*/
class C_ReadRequest : public Context {
public:
- C_ReadRequest(CephContext *cct, Context *c, Mutex *cache_lock)
+ C_ReadRequest(CephContext *cct, Context *c, ceph::mutex *cache_lock)
: m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) {
}
void finish(int r) override {
ldout(m_cct, 20) << "aio_cb completing " << dendl;
{
- Mutex::Locker cache_locker(*m_cache_lock);
+ std::lock_guard cache_locker{*m_cache_lock};
m_ctx->complete(r);
}
ldout(m_cct, 20) << "aio_cb finished" << dendl;
private:
CephContext *m_cct;
Context *m_ctx;
- Mutex *m_cache_lock;
+ ceph::mutex *m_cache_lock;
};
class C_OrderedWrite : public Context {
void finish(int r) override {
ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl;
{
- Mutex::Locker l(m_wb_handler->m_lock);
+ std::lock_guard l{m_wb_handler->m_lock};
ceph_assert(!m_result->done);
m_result->done = true;
m_result->ret = r;
}
};
-ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock)
+ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock)
: m_tid(0), m_lock(lock), m_ictx(ictx) {
}
uint64_t read_len,
snapid_t snapid)
{
- m_ictx->image_lock.get_read();
+ m_ictx->image_lock.lock_shared();
librados::snap_t snap_id = m_ictx->snap_id;
uint64_t overlap = 0;
m_ictx->get_parent_overlap(snap_id, &overlap);
- m_ictx->image_lock.put_read();
+ m_ictx->image_lock.unlock_shared();
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
void ObjectCacherWriteback::complete_writes(const std::string& oid)
{
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
std::queue<write_result_d*>& results = m_writes[oid];
ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl;
std::list<write_result_d*> finished;
#include "osdc/WritebackHandler.h"
#include <queue>
-class Mutex;
class Context;
namespace librbd {
class ObjectCacherWriteback : public WritebackHandler {
public:
- ObjectCacherWriteback(ImageCtx *ictx, Mutex& lock);
+ ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock);
// Note that oloc, trunc_size, and trunc_seq are ignored
void read(const object_t& oid, uint64_t object_no,
void complete_writes(const std::string& oid);
ceph_tid_t m_tid;
- Mutex& m_lock;
+ ceph::mutex& m_lock;
librbd::ImageCtx *m_ictx;
ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
friend class C_OrderedWrite;
#ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
-#include "common/Mutex.h"
#include "librbd/io/ObjectDispatchInterface.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "librbd/cache/TypeTraits.h"
WriteAroundObjectDispatch<I>::WriteAroundObjectDispatch(
I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
: m_image_ctx(image_ctx), m_init_max_dirty(max_dirty), m_max_dirty(max_dirty),
- m_lock(util::unique_lock_name(
- "librbd::cache::WriteAroundObjectDispatch::lock", this)) {
+ m_lock(ceph::make_mutex(util::unique_lock_name(
+ "librbd::cache::WriteAroundObjectDispatch::lock", this))) {
if (writethrough_until_flush) {
m_max_dirty = 0;
}
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
m_user_flushed = true;
if (m_max_dirty == 0 && m_init_max_dirty > 0) {
io::DispatchResult* dispatch_result, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
- m_lock.Lock();
+ m_lock.lock();
auto in_flight_extents_it = m_in_flight_extents.find(object_no);
if (in_flight_extents_it == m_in_flight_extents.end() ||
!in_flight_extents_it->second.intersects(object_off, object_len)) {
// no IO in-flight to the specified extent
- m_lock.Unlock();
+ m_lock.unlock();
return false;
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_blocked_unoptimized_ios[object_no].emplace(
tid, BlockedIO{object_off, object_len, nullptr, on_dispatched});
- m_lock.Unlock();
+ m_lock.unlock();
return true;
}
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
- m_lock.Lock();
+ m_lock.lock();
if (m_max_dirty == 0) {
// write-through mode is active -- no-op the cache
- m_lock.Unlock();
+ m_lock.unlock();
return false;
}
if ((op_flags & LIBRADOS_OP_FLAG_FADVISE_FUA) != 0) {
// force unit access flag is set -- disable write-around
- m_lock.Unlock();
+ m_lock.unlock();
return dispatch_unoptimized_io(object_no, object_off, object_len,
dispatch_result, on_dispatched);
}
m_queued_or_blocked_io_tids.insert(tid);
m_blocked_ios[object_no].emplace(tid, BlockedIO{object_off, object_len, ctx,
on_dispatched});
- m_lock.Unlock();
+ m_lock.unlock();
} else if (can_dispatch_io(tid, object_len)) {
- m_lock.Unlock();
+ m_lock.unlock();
ldout(cct, 20) << "dispatching: tid=" << tid << dendl;
on_dispatched->complete(0);
ldout(cct, 20) << "queueing: tid=" << tid << dendl;
m_queued_or_blocked_io_tids.insert(tid);
m_queued_ios.emplace(tid, QueuedIO{object_len, ctx, on_dispatched});
- m_lock.Unlock();
+ m_lock.unlock();
}
return true;
}
uint64_t object_no, uint64_t object_off, uint64_t object_len,
Contexts* unoptimized_io_dispatches) {
auto cct = m_image_ctx->cct;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto in_flight_extents_it = m_in_flight_extents.find(object_no);
ceph_assert(in_flight_extents_it != m_in_flight_extents.end());
template <typename I>
bool WriteAroundObjectDispatch<I>::can_dispatch_io(
uint64_t tid, uint64_t length) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_in_flight_bytes == 0 || m_in_flight_bytes + length <= m_max_dirty) {
// no in-flight IO or still under max write-around in-flight limit.
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
- m_lock.Lock();
+ m_lock.lock();
m_in_flight_io_tids.erase(tid);
ceph_assert(m_in_flight_bytes >= object_len);
m_in_flight_bytes -= object_len;
// collect any queued flushes that were tied to queued IOs
auto ready_flushes = collect_ready_flushes();
- m_lock.Unlock();
+ m_lock.unlock();
// dispatch any ready unoptimized IOs
for (auto& it : unoptimized_io_dispatches) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << ", tid=" << tid << dendl;
- m_lock.Lock();
+ m_lock.lock();
// move the in-flight flush to the pending completion list
auto it = m_in_flight_flushes.find(tid);
if (!finished_flushes.empty()) {
std::swap(pending_flush_error, m_pending_flush_error);
}
- m_lock.Unlock();
+ m_lock.unlock();
// complete flushes that were waiting on in-flight IO
// (and propogate any IO errors)
template <typename I>
typename WriteAroundObjectDispatch<I>::QueuedIOs
WriteAroundObjectDispatch<I>::collect_ready_ios() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
QueuedIOs queued_ios;
template <typename I>
typename WriteAroundObjectDispatch<I>::Contexts
WriteAroundObjectDispatch<I>::collect_ready_flushes() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Contexts ready_flushes;
auto io_tid_it = m_queued_or_blocked_io_tids.begin();
template <typename I>
typename WriteAroundObjectDispatch<I>::Contexts
WriteAroundObjectDispatch<I>::collect_finished_flushes() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Contexts finished_flushes;
auto io_tid_it = m_in_flight_io_tids.begin();
#include "librbd/io/ObjectDispatchInterface.h"
#include "include/interval_set.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/io/Types.h"
#include <map>
#include <set>
size_t m_init_max_dirty;
size_t m_max_dirty;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_user_flushed = false;
uint64_t m_last_tid = 0;
m_snap_id_end(snap_id_end), m_flatten(flatten),
m_object_number(object_number), m_snap_seqs(snap_seqs),
m_prog_ctx(prog_ctx), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
- m_lock(unique_lock_name("ImageCopyRequest::m_lock", this)) {
+ m_lock(ceph::make_mutex(unique_lock_name("ImageCopyRequest::m_lock", this))) {
}
template <typename I>
template <typename I>
void ImageCopyRequest<I>::cancel() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
uint64_t size;
{
- RWLock::RLocker image_locker(m_src_image_ctx->image_lock);
+ std::shared_lock image_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->get_image_size(CEPH_NOSNAP);
for (auto snap_id : m_src_image_ctx->snaps) {
size = std::max(size, m_src_image_ctx->get_image_size(snap_id));
bool complete;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (uint64_t i = 0;
i < m_src_image_ctx->config.template get_val<uint64_t>("rbd_concurrent_management_ops");
++i) {
template <typename I>
void ImageCopyRequest<I>::send_next_object_copy() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_canceled && m_ret_val == 0) {
ldout(m_cct, 10) << "image copy canceled" << dendl;
bool complete;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_current_ops > 0);
--m_current_ops;
m_copied_objects.pop();
uint64_t progress_object_no = *m_object_number + 1;
m_updating_progress = true;
- m_lock.Unlock();
+ m_lock.unlock();
m_prog_ctx->update_progress(progress_object_no, m_end_object_no);
- m_lock.Lock();
+ m_lock.lock();
ceph_assert(m_updating_progress);
m_updating_progress = false;
}
#include "include/int_types.h"
#include "include/rados/librados.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "librbd/Types.h"
#include "librbd/deep_copy/Types.h"
Context *m_on_finish;
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_canceled = false;
uint64_t m_object_no = 0;
template <typename I>
void ObjectCopyRequest<I>::send_read_from_parent() {
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
io::Extents image_extents;
compute_read_from_parent_ops(&image_extents);
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
if (image_extents.empty()) {
handle_read_from_parent(0);
int r;
Context *finish_op_ctx;
{
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
}
if (finish_op_ctx == nullptr) {
return;
}
- m_dst_image_ctx->owner_lock.get_read();
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->owner_lock.lock_shared();
+ m_dst_image_ctx->image_lock.lock_shared();
if (m_dst_image_ctx->object_map == nullptr) {
// possible that exclusive lock was lost in background
lderr(m_cct) << "object map is not initialized" << dendl;
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
finish(-EINVAL);
return;
}
auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r);
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
- m_dst_image_ctx->image_lock.put_read();
- m_dst_image_ctx->owner_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
+ m_dst_image_ctx->owner_lock.unlock_shared();
finish(r);
return;
}
{}, {}, false, ctx);
// NOTE: state machine might complete before we reach here
- dst_image_ctx->image_lock.put_read();
- dst_image_ctx->owner_lock.put_read();
+ dst_image_ctx->image_lock.unlock_shared();
+ dst_image_ctx->owner_lock.unlock_shared();
if (!sent) {
ceph_assert(dst_snap_id == CEPH_NOSNAP);
ctx->complete(0);
}
template <typename I>
-Context *ObjectCopyRequest<I>::start_lock_op(RWLock &owner_lock, int* r) {
- ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
+Context *ObjectCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock,
+ int* r) {
+ ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
m_read_snaps = {};
m_zero_interval = {};
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
bool hide_parent = (m_src_image_ctx->parent != nullptr);
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
librados::snap_t src_copy_point_snap_id = m_snap_map.rbegin()->first;
bool prev_exists = hide_parent;
template <typename I>
void ObjectCopyRequest<I>::compute_read_from_parent_ops(
io::Extents *parent_image_extents) {
- assert(m_src_image_ctx->image_lock.is_locked());
+ assert(ceph_mutex_is_locked(m_src_image_ctx->image_lock));
m_read_ops = {};
m_zero_interval = {};
bool fast_diff = m_dst_image_ctx->test_features(RBD_FEATURE_FAST_DIFF);
uint64_t prev_end_size = 0;
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
bool hide_parent = (m_src_image_ctx->parent != nullptr);
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
for (auto &it : m_dst_zero_interval) {
auto src_snap_seq = it.first;
}
if (hide_parent) {
- RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
uint64_t parent_overlap = 0;
int r = m_dst_image_ctx->get_parent_overlap(dst_snap_seq,
&parent_overlap);
template <typename I>
void ObjectCopyRequest<I>::compute_dst_object_may_exist() {
- RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
auto snap_ids = m_dst_image_ctx->snaps;
snap_ids.push_back(CEPH_NOSNAP);
void send_update_object_map();
void handle_update_object_map(int r);
- Context *start_lock_op(RWLock &owner_lock, int* r);
+ Context *start_lock_op(ceph::shared_mutex &owner_lock, int* r);
uint64_t src_to_dst_object_offset(uint64_t objectno, uint64_t offset);
template <typename I>
void SetHeadRequest<I>::send_set_size() {
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->size == m_size) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
send_detach_parent();
return;
}
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
{
// adjust in-memory image size now that it's updated on disk
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->size > m_size) {
if (m_image_ctx->parent_md.spec.pool_id != -1 &&
m_image_ctx->parent_md.overlap > m_size) {
template <typename I>
void SetHeadRequest<I>::send_detach_parent() {
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->parent_md.spec.pool_id == -1 ||
(m_image_ctx->parent_md.spec == m_parent_spec &&
m_image_ctx->parent_md.overlap == m_parent_overlap)) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
send_attach_parent();
return;
}
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
{
// adjust in-memory parent now that it's updated on disk
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->parent_md.spec = {};
m_image_ctx->parent_md.overlap = 0;
}
template <typename I>
void SetHeadRequest<I>::send_attach_parent() {
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
if (m_image_ctx->parent_md.spec == m_parent_spec &&
m_image_ctx->parent_md.overlap == m_parent_overlap) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
finish(0);
return;
}
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << dendl;
{
// adjust in-memory parent now that it's updated on disk
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->parent_md.spec = m_parent_spec;
m_image_ctx->parent_md.overlap = m_parent_overlap;
}
template <typename I>
Context *SetHeadRequest<I>::start_lock_op(int* r) {
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
m_dst_image_ctx(dst_image_ctx), m_snap_id_end(snap_id_end),
m_flatten(flatten), m_work_queue(work_queue), m_snap_seqs_result(snap_seqs),
m_snap_seqs(*snap_seqs), m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
- m_lock(unique_lock_name("SnapshotCopyRequest::m_lock", this)) {
+ m_lock(ceph::make_mutex(unique_lock_name("SnapshotCopyRequest::m_lock", this))) {
// snap ids ordered from oldest to newest
m_src_snap_ids.insert(src_image_ctx->snaps.begin(),
src_image_ctx->snaps.end());
template <typename I>
void SnapshotCopyRequest<I>::cancel() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
for (; snap_id_it != m_dst_snap_ids.end(); ++snap_id_it) {
librados::snap_t dst_snap_id = *snap_id_it;
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->image_lock.lock_shared();
bool dst_unprotected;
int r = m_dst_image_ctx->is_snap_unprotected(dst_snap_id, &dst_unprotected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap unprotect status: "
<< cpp_strerror(r) << dendl;
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
if (dst_unprotected) {
// snap is already unprotected -- check next snap
});
if (snap_seq_it != m_snap_seqs.end()) {
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
bool src_unprotected;
r = m_src_image_ctx->is_snap_unprotected(snap_seq_it->first,
&src_unprotected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap unprotect status: "
<< cpp_strerror(r) << dendl;
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
if (src_unprotected) {
// source is unprotected -- unprotect destination snap
handle_snap_unprotect(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_unprotect(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
{
// avoid the need to refresh to delete the newly unprotected snapshot
- RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
auto snap_info_it = m_dst_image_ctx->snap_info.find(m_prev_snap_id);
if (snap_info_it != m_dst_image_ctx->snap_info.end()) {
snap_info_it->second.protection_status =
librados::snap_t dst_snap_id = *snap_id_it;
cls::rbd::SnapshotNamespace snap_namespace;
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->image_lock.lock_shared();
int r = m_dst_image_ctx->get_snap_namespace(dst_snap_id, &snap_namespace);
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap namespace: "
<< m_snap_name << dendl;
handle_snap_remove(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_remove(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
librados::snap_t src_snap_id = *snap_id_it;
cls::rbd::SnapshotNamespace snap_namespace;
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
int r = m_src_image_ctx->get_snap_namespace(src_snap_id, &snap_namespace);
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap namespace: "
<< m_snap_name << dendl;
m_prev_snap_id = *snap_id_it;
m_snap_name = get_snapshot_name(m_src_image_ctx, m_prev_snap_id);
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
auto snap_info_it = m_src_image_ctx->snap_info.find(m_prev_snap_id);
if (snap_info_it == m_src_image_ctx->snap_info.end()) {
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
lderr(m_cct) << "failed to retrieve source snap info: " << m_snap_name
<< dendl;
finish(-ENOENT);
parent_spec = m_dst_parent_spec;
parent_overlap = snap_info_it->second.parent.overlap;
}
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
ldout(m_cct, 20) << "snap_name=" << m_snap_name << ", "
<< "snap_id=" << m_prev_snap_id << ", "
for (; snap_id_it != m_src_snap_ids.end(); ++snap_id_it) {
librados::snap_t src_snap_id = *snap_id_it;
- m_src_image_ctx->image_lock.get_read();
+ m_src_image_ctx->image_lock.lock_shared();
bool src_protected;
int r = m_src_image_ctx->is_snap_protected(src_snap_id, &src_protected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve source snap protect status: "
<< cpp_strerror(r) << dendl;
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
- m_src_image_ctx->image_lock.put_read();
+ m_src_image_ctx->image_lock.unlock_shared();
if (!src_protected) {
// snap is not protected -- check next snap
auto snap_seq_it = m_snap_seqs.find(src_snap_id);
ceph_assert(snap_seq_it != m_snap_seqs.end());
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->image_lock.lock_shared();
bool dst_protected;
r = m_dst_image_ctx->is_snap_protected(snap_seq_it->second, &dst_protected);
if (r < 0) {
lderr(m_cct) << "failed to retrieve destination snap protect status: "
<< cpp_strerror(r) << dendl;
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
finish(r);
return;
}
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
if (!dst_protected) {
break;
handle_snap_protect(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_protect(
cls::rbd::UserSnapshotNamespace(), m_snap_name.c_str(), ctx);
}
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap = 0;
{
- RWLock::RLocker src_locker(m_src_image_ctx->image_lock);
+ std::shared_lock src_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->size;
if (!m_flatten) {
parent_spec = m_src_image_ctx->parent_md.spec;
if (m_snap_id_end == CEPH_NOSNAP &&
m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP)) {
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
- RWLock::RLocker image_locker(m_dst_image_ctx->image_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
+ std::shared_lock image_locker{m_dst_image_ctx->image_lock};
if (m_dst_image_ctx->object_map != nullptr &&
Striper::get_num_objects(m_dst_image_ctx->layout,
template <typename I>
bool SnapshotCopyRequest<I>::handle_cancellation() {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_canceled) {
return false;
}
template <typename I>
int SnapshotCopyRequest<I>::validate_parent(I *image_ctx,
cls::rbd::ParentImageSpec *spec) {
- RWLock::RLocker owner_locker(image_ctx->owner_lock);
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock owner_locker{image_ctx->owner_lock};
+ std::shared_lock image_locker{image_ctx->image_lock};
// ensure source image's parent specs are still consistent
*spec = image_ctx->parent_md.spec;
template <typename I>
Context *SnapshotCopyRequest<I>::start_lock_op(int* r) {
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
return start_lock_op(m_dst_image_ctx->owner_lock, r);
}
template <typename I>
-Context *SnapshotCopyRequest<I>::start_lock_op(RWLock &owner_lock, int* r) {
- ceph_assert(m_dst_image_ctx->owner_lock.is_locked());
+Context *SnapshotCopyRequest<I>::start_lock_op(ceph::shared_mutex &owner_lock, int* r) {
+ ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock));
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
cls::rbd::ParentImageSpec m_dst_parent_spec;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_canceled = false;
void send_snap_unprotect();
int validate_parent(ImageCtxT *image_ctx, cls::rbd::ParentImageSpec *spec);
Context *start_lock_op(int* r);
- Context *start_lock_op(RWLock &owner_locki, int* r);
+ Context *start_lock_op(ceph::shared_mutex &owner_locki, int* r);
void finish(int r);
};
handle_create_snap(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
m_dst_image_ctx->operations->execute_snap_create(m_snap_namespace,
m_snap_name.c_str(),
ctx,
return;
}
- m_dst_image_ctx->image_lock.get_read();
+ m_dst_image_ctx->image_lock.lock_shared();
auto snap_it = m_dst_image_ctx->snap_ids.find(
{cls::rbd::UserSnapshotNamespace(), m_snap_name});
if (snap_it == m_dst_image_ctx->snap_ids.end()) {
lderr(m_cct) << "failed to locate snap: " << m_snap_name << dendl;
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
finish(-ENOENT);
return;
}
librados::snap_t local_snap_id = snap_it->second;
- m_dst_image_ctx->image_lock.put_read();
+ m_dst_image_ctx->image_lock.unlock_shared();
std::string object_map_oid(librbd::ObjectMap<>::object_map_name(
m_dst_image_ctx->id, local_snap_id));
template <typename I>
Context *SnapshotCreateRequest<I>::start_lock_op(int* r) {
- RWLock::RLocker owner_locker(m_dst_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock == nullptr) {
return new FunctionContext([](int r) {});
}
namespace exclusive_lock {
int AutomaticPolicy::lock_requested(bool force) {
- ceph_assert(m_image_ctx->owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
bool journal_enabled;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
journal_enabled = (m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
m_image_ctx.image_lock) &&
!m_image_ctx.get_journal_policy()->journal_disabled());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
using klass = PostAcquireRequest<I>;
Context *ctx = create_context_callback<
klass, &klass::handle_allocate_journal_tag>(this);
template <typename I>
void PostAcquireRequest<I>::apply() {
{
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map == nullptr);
m_image_ctx.object_map = m_object_map;
template <typename I>
void PostAcquireRequest<I>::revert() {
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.object_map = nullptr;
m_image_ctx.journal = nullptr;
klass, &klass::handle_block_writes>(this);
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
// setting the lock as required will automatically cause the IO
// queue to re-request the lock if any IO is queued
if (m_image_ctx.clone_copy_on_read ||
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
Context *ctx = create_context_callback<
PreReleaseRequest<I>,
&PreReleaseRequest<I>::handle_invalidate_cache>(this);
template <typename I>
void PreReleaseRequest<I>::send_close_journal() {
{
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
std::swap(m_journal, m_image_ctx.journal);
}
template <typename I>
void PreReleaseRequest<I>::send_close_object_map() {
{
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
std::swap(m_object_map, m_image_ctx.object_map);
}
namespace exclusive_lock {
int StandardPolicy::lock_requested(bool force) {
- ceph_assert(m_image_ctx->owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->owner_lock));
ceph_assert(m_image_ctx->exclusive_lock != nullptr);
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << ": force=" << force
bool snap_protected = false;
if (r == 0) {
- RWLock::RLocker image_locker(m_parent_image_ctx->image_lock);
+ std::shared_lock image_locker{m_parent_image_ctx->image_lock};
r = m_parent_image_ctx->is_snap_protected(m_parent_snap_id,
&snap_protected);
}
return;
}
- m_parent_image_ctx->image_lock.get_read();
+ m_parent_image_ctx->image_lock.lock_shared();
uint64_t p_features = m_parent_image_ctx->features;
m_size = m_parent_image_ctx->get_image_size(m_parent_image_ctx->snap_id);
bool snap_protected;
int r = m_parent_image_ctx->is_snap_protected(m_parent_image_ctx->snap_id, &snap_protected);
- m_parent_image_ctx->image_lock.put_read();
+ m_parent_image_ctx->image_lock.unlock_shared();
if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) {
lderr(m_cct) << "parent image must support layering" << dendl;
Context *ctx = create_context_callback<
klass, &klass::handle_create_child>(this);
- RWLock::RLocker image_locker(m_parent_image_ctx->image_lock);
+ std::shared_lock image_locker{m_parent_image_ctx->image_lock};
CreateRequest<I> *req = CreateRequest<I>::create(
m_config, m_ioctx, m_name, m_id, m_size, m_opts,
m_non_primary_global_image_id, m_primary_mirror_uuid, true,
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
m_image_ctx->io_work_queue->shut_down(create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_shut_down_io_queue>(this));
}
template <typename I>
void CloseRequest<I>::send_shut_down_exclusive_lock() {
{
- RWLock::WLocker owner_locker(m_image_ctx->owner_lock);
+ std::unique_lock owner_locker{m_image_ctx->owner_lock};
m_exclusive_lock = m_image_ctx->exclusive_lock;
// if reading a snapshot -- possible object map is open
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
if (m_exclusive_lock == nullptr) {
delete m_image_ctx->object_map;
m_image_ctx->object_map = nullptr;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
{
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
ceph_assert(m_image_ctx->exclusive_lock == nullptr);
// object map and journal closed during exclusive lock shutdown
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock image_locker{m_image_ctx->image_lock};
ceph_assert(m_image_ctx->journal == nullptr);
ceph_assert(m_image_ctx->object_map == nullptr);
}
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
auto ctx = create_context_callback<
CloseRequest<I>, &CloseRequest<I>::handle_flush>(this);
auto aio_comp = io::AioCompletion::create_and_start(ctx, m_image_ctx,
template <typename I>
void DetachChildRequest<I>::send() {
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
// use oldest snapshot or HEAD for parent spec
if (!m_image_ctx.snap_info.empty()) {
m_watchers->clear();
if (m_object_watchers.size() > 0) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
uint64_t watch_handle = m_image_ctx.image_watcher != nullptr ?
m_image_ctx.image_watcher->get_watch_handle() : 0;
uint64_t snap_id = CEPH_NOSNAP;
std::swap(m_image_ctx->open_snap_id, snap_id);
if (snap_id == CEPH_NOSNAP) {
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock image_locker{m_image_ctx->image_lock};
snap_id = m_image_ctx->get_snap_id(m_image_ctx->snap_namespace,
m_image_ctx->snap_name);
}
template <typename I>
void PreRemoveRequest<I>::acquire_exclusive_lock() {
- RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+ std::shared_lock owner_lock{m_image_ctx->owner_lock};
if (m_image_ctx->exclusive_lock == nullptr) {
validate_image_removal();
return;
// do not attempt to open the journal when removing the image in case
// it's corrupt
if (m_image_ctx->test_features(RBD_FEATURE_JOURNALING)) {
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->set_journal_policy(new journal::DisabledPolicy());
}
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
for (auto& snap_info : m_image_ctx->snap_info) {
if (auto_delete_snapshot(snap_info.second)) {
m_snap_infos.insert(snap_info);
} else {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
ldout(cct, 5) << "image has snapshots - not removing" << dendl;
finish(-ENOTEMPTY);
return;
}
}
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
list_image_watchers();
}
ldout(cct, 20) << "snap_id=" << snap_id << ", "
<< "snap_name=" << snap_info.name << dendl;
- RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+ std::shared_lock owner_lock{m_image_ctx->owner_lock};
auto ctx = create_context_callback<
PreRemoveRequest<I>, &PreRemoveRequest<I>::handle_remove_snapshot>(this);
auto req = librbd::operation::SnapshotRemoveRequest<I>::create(
bool RefreshParentRequest<I>::is_refresh_required(
I &child_image_ctx, const ParentImageInfo &parent_md,
const MigrationInfo &migration_info) {
- ceph_assert(child_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(child_image_ctx.image_lock));
return (is_open_required(child_image_ctx, parent_md, migration_info) ||
is_close_required(child_image_ctx, parent_md, migration_info));
}
template <typename I>
void RefreshParentRequest<I>::apply() {
- ceph_assert(m_child_image_ctx.image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_child_image_ctx.image_lock));
std::swap(m_child_image_ctx.parent, m_parent_image_ctx);
}
uint64_t snap_id;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
snap_id = m_image_ctx.snap_id;
}
template <typename I>
void RefreshRequest<I>::send_v2_refresh_parent() {
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
ParentImageInfo parent_md;
MigrationInfo migration_info;
Context *ctx = create_context_callback<
klass, &klass::handle_v2_init_exclusive_lock>(this);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_exclusive_lock->init(m_features, ctx);
}
!m_image_ctx.exclusive_lock->is_lock_owner());
bool journal_disabled_by_policy;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
journal_disabled_by_policy = (
!journal_disabled &&
m_image_ctx.get_journal_policy()->journal_disabled());
void RefreshRequest<I>::send_v2_block_writes() {
bool disabled_journaling = false;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
(m_features & RBD_FEATURE_JOURNALING) == 0 &&
m_image_ctx.journal != nullptr);
Context *ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_work_queue->block_writes(ctx);
}
}
{
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
+ std::unique_lock owner_locker{m_image_ctx.owner_lock};
ceph_assert(m_image_ctx.exclusive_lock == nullptr);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
auto ctx = create_context_callback<
RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this);
auto aio_comp = io::AioCompletion::create_and_start(
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
m_image_ctx.size = m_size;
m_image_ctx.lockers = m_lockers;
*m_image_ctx, create_context_callback<
klass, &klass::handle_trim_image>(this));
- RWLock::RLocker owner_lock(m_image_ctx->owner_lock);
+ std::shared_lock owner_lock{m_image_ctx->owner_lock};
auto req = librbd::operation::TrimRequest<I>::create(
*m_image_ctx, ctx, m_image_ctx->size, 0, m_prog_ctx);
req->send();
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
std::vector<uint64_t> snap_ids;
snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
#define CEPH_LIBRBD_IMAGE_SET_FLAGS_REQUEST_H
#include "include/buffer.h"
-#include "common/Mutex.h"
#include <map>
#include <string>
template <typename I>
void SetSnapRequest<I>::send_init_exclusive_lock() {
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
ceph_assert(m_image_ctx.snap_id == CEPH_NOSNAP);
send_complete();
Context *ctx = create_context_callback<
klass, &klass::handle_init_exclusive_lock>(this);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_exclusive_lock->init(m_image_ctx.features, ctx);
}
Context *ctx = create_context_callback<
klass, &klass::handle_block_writes>(this);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.io_work_queue->block_writes(ctx);
}
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
auto it = m_image_ctx.snap_info.find(m_snap_id);
if (it == m_image_ctx.snap_info.end()) {
ldout(cct, 5) << "failed to locate snapshot '" << m_snap_id << "'"
template <typename I>
Context *SetSnapRequest<I>::send_shut_down_exclusive_lock(int *result) {
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
m_exclusive_lock = m_image_ctx.exclusive_lock;
}
ParentImageInfo parent_md;
bool refresh_parent;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
const auto parent_info = m_image_ctx.get_parent_info(m_snap_id);
if (parent_info == nullptr) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << __func__ << dendl;
- RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::scoped_lock locker{m_image_ctx.owner_lock, m_image_ctx.image_lock};
if (m_snap_id != CEPH_NOSNAP) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr);
int r = m_image_ctx.snap_set(m_snap_id);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
m_notifier.notify(m_bl, &m_notify_response, create_context_callback<
NotifyLockOwner, &NotifyLockOwner::handle_notify>(this));
}
void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize)
{
int obj_order = ictx->order;
- ictx->image_lock.get_read();
- info.size = ictx->get_image_size(ictx->snap_id);
- ictx->image_lock.put_read();
+ {
+ std::shared_lock locker{ictx->image_lock};
+ info.size = ictx->get_image_size(ictx->snap_id);
+ }
info.obj_size = 1ULL << obj_order;
info.num_objs = Striper::get_num_objects(ictx->layout, info.size);
info.order = obj_order;
void trim_image(ImageCtx *ictx, uint64_t newsize, ProgressContext& prog_ctx)
{
- ceph_assert(ictx->owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(ictx->owner_lock));
ceph_assert(ictx->exclusive_lock == nullptr ||
ictx->exclusive_lock->is_lock_owner());
C_SaferCond ctx;
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
operation::TrimRequest<> *req = operation::TrimRequest<>::create(
*ictx, &ctx, ictx->size, newsize, prog_ctx);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
req->send();
int r = ctx.wait();
return r;
}
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap_name);
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(*snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
if (r < 0)
return r;
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
- RWLock::RLocker l2(ictx->image_lock);
+ std::shared_lock l2{ictx->image_lock};
*size = ictx->get_image_size(ictx->snap_id);
return 0;
}
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
*features = ictx->features;
return 0;
}
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
return ictx->get_parent_overlap(ictx->snap_id, overlap);
}
return r;
}
- RWLock::RLocker l2(ictx->image_lock);
+ std::shared_lock l2{ictx->image_lock};
return ictx->get_flags(ictx->snap_id, flags);
}
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
*is_owner = false;
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return 0;
}
C_SaferCond lock_ctx;
{
- RWLock::WLocker l(ictx->owner_lock);
+ std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return r;
}
- RWLock::RLocker l(ictx->owner_lock);
+ std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return -EINVAL;
} else if (!ictx->exclusive_lock->is_lock_owner()) {
C_SaferCond lock_ctx;
{
- RWLock::WLocker l(ictx->owner_lock);
+ std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr ||
!ictx->exclusive_lock->is_lock_owner()) {
managed_lock::Locker locker;
C_SaferCond get_owner_ctx;
{
- RWLock::RLocker l(ictx->owner_lock);
+ std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
C_SaferCond break_ctx;
{
- RWLock::RLocker l(ictx->owner_lock);
+ std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
if (r < 0)
return r;
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
for (map<snap_t, SnapInfo>::iterator it = ictx->snap_info.begin();
it != ictx->snap_info.end(); ++it) {
snap_info_t info;
if (r < 0)
return r;
- RWLock::RLocker l(ictx->image_lock);
+ std::shared_lock l{ictx->image_lock};
*exists = ictx->get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP;
return 0;
}
<< (src->snap_name.length() ? "@" + src->snap_name : "")
<< " -> " << destname << " opts = " << opts << dendl;
- src->image_lock.get_read();
+ src->image_lock.lock_shared();
uint64_t features = src->features;
uint64_t src_size = src->get_image_size(src->snap_id);
- src->image_lock.put_read();
+ src->image_lock.unlock_shared();
uint64_t format = src->old_format ? 1 : 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size)
{
- src->image_lock.get_read();
+ src->image_lock.lock_shared();
uint64_t src_size = src->get_image_size(src->snap_id);
- src->image_lock.put_read();
+ src->image_lock.unlock_shared();
- dest->image_lock.get_read();
+ dest->image_lock.lock_shared();
uint64_t dest_size = dest->get_image_size(dest->snap_id);
- dest->image_lock.put_read();
+ dest->image_lock.unlock_shared();
CephContext *cct = src->cct;
if (dest_size < src_size) {
trace.init("copy", &src->trace_endpoint);
}
- RWLock::RLocker owner_lock(src->owner_lock);
+ std::shared_lock owner_lock{src->owner_lock};
SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
uint64_t period = src->get_stripe_period();
unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
}
{
- RWLock::RLocker image_locker(src->image_lock);
+ std::shared_lock image_locker{src->image_lock};
if (src->object_map != nullptr) {
bool skip = true;
// each period is related to src->stripe_count objects, check them all
if (r < 0)
return r;
- RWLock::RLocker locker(ictx->image_lock);
+ std::shared_lock locker{ictx->image_lock};
if (exclusive)
*exclusive = ictx->exclusive_locked;
if (tag)
* duplicate that code.
*/
{
- RWLock::RLocker locker(ictx->image_lock);
+ std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::lock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME,
exclusive ? LOCK_EXCLUSIVE : LOCK_SHARED,
cookie, tag, "", utime_t(), 0);
return r;
{
- RWLock::RLocker locker(ictx->image_lock);
+ std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::unlock(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, cookie);
if (r < 0) {
return r;
uint64_t mylen = len;
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &mylen);
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
if (r < 0)
return r;
trace.init("read_iterate", &ictx->trace_endpoint);
}
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
start_time = coarse_mono_clock::now();
while (left > 0) {
uint64_t period_off = off - (off % period);
// validate extent against image size; clip to image size if necessary
int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len)
{
- ceph_assert(ictx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
uint64_t image_size = ictx->get_image_size(ictx->snap_id);
bool snap_exists = ictx->snap_exists;
C_SaferCond ctx;
{
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
ictx->io_object_dispatcher->invalidate_cache(&ctx);
}
r = ctx.wait();
: image_ctx(image_ctx), flush_contexts(std::move(flush_contexts)) {
}
void finish(int r) override {
- RWLock::RLocker owner_locker(image_ctx->owner_lock);
+ std::shared_lock owner_locker{image_ctx->owner_lock};
while (!flush_contexts.empty()) {
Context *flush_ctx = flush_contexts.front();
flush_contexts.pop_front();
m_image_ctx = &image_ctx;
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
- Mutex::Locker l(m_image_ctx->async_ops_lock);
+ std::lock_guard l{m_image_ctx->async_ops_lock};
m_image_ctx->async_ops.push_front(&m_xlist_item);
}
ldout(m_image_ctx->cct, 20) << this << " " << __func__ << dendl;
{
- Mutex::Locker l(m_image_ctx->async_ops_lock);
+ std::lock_guard l{m_image_ctx->async_ops_lock};
xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
++iter;
ceph_assert(m_xlist_item.remove_myself());
void AsyncOperation::flush(Context* on_finish) {
{
- Mutex::Locker locker(m_image_ctx->async_ops_lock);
+ std::lock_guard locker{m_image_ctx->async_ops_lock};
xlist<AsyncOperation *>::iterator iter(&m_xlist_item);
++iter;
#include "librbd/io/CopyupRequest.h"
#include "common/ceph_context.h"
+#include "common/ceph_mutex.h"
#include "common/dout.h"
#include "common/errno.h"
-#include "common/Mutex.h"
#include "common/WorkQueue.h"
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/ExclusiveLock.h"
int send() override {
auto& image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (image_ctx.exclusive_lock == nullptr) {
return 1;
}
ceph_assert(image_ctx.exclusive_lock->is_lock_owner());
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map == nullptr) {
return 1;
}
int update_head() {
auto& image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
bool sent = image_ctx.object_map->template aio_update<Context>(
CEPH_NOSNAP, m_object_no, m_head_object_map_state, {}, m_trace, false,
int update_snapshot(uint64_t snap_id) {
auto& image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
uint8_t state = OBJECT_EXISTS;
if (image_ctx.test_features(RBD_FEATURE_FAST_DIFF, image_ctx.image_lock) &&
Extents &&image_extents,
const ZTracer::Trace &parent_trace)
: m_image_ctx(ictx), m_object_no(objectno), m_image_extents(image_extents),
- m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace)),
- m_lock("CopyupRequest", false, false)
+ m_trace(util::create_trace(*m_image_ctx, "copy-up", parent_trace))
{
m_async_op.start_op(*util::get_image_ctx(m_image_ctx));
}
template <typename I>
void CopyupRequest<I>::append_request(AbstractObjectWriteRequest<I> *req) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_request=" << req << ", "
template <typename I>
void CopyupRequest<I>::read_from_parent() {
auto cct = m_image_ctx->cct;
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->parent == nullptr) {
ldout(cct, 5) << "parent detached" << dendl;
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
- m_image_ctx->image_lock.get_read();
- m_lock.Lock();
+ m_image_ctx->image_lock.lock_shared();
+ m_lock.lock();
m_copyup_is_zero = m_copyup_data.is_zero();
m_copyup_required = is_copyup_required();
disable_append_requests();
if (r < 0 && r != -ENOENT) {
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
lderr(cct) << "error reading from parent: " << cpp_strerror(r) << dendl;
finish(r);
}
if (!m_copyup_required) {
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
ldout(cct, 20) << "no-op, skipping" << dendl;
finish(0);
m_image_ctx->snaps.rend());
}
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
update_object_maps();
}
template <typename I>
void CopyupRequest<I>::deep_copy() {
auto cct = m_image_ctx->cct;
- ceph_assert(m_image_ctx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
ceph_assert(m_image_ctx->parent != nullptr);
- m_lock.Lock();
+ m_lock.lock();
m_flatten = is_copyup_required() ? true : m_image_ctx->migration_info.flatten;
- m_lock.Unlock();
+ m_lock.unlock();
ldout(cct, 20) << "flatten=" << m_flatten << dendl;
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "r=" << r << dendl;
- m_image_ctx->image_lock.get_read();
- m_lock.Lock();
+ m_image_ctx->image_lock.lock_shared();
+ m_lock.lock();
m_copyup_required = is_copyup_required();
if (r == -ENOENT && !m_flatten && m_copyup_required) {
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
ldout(cct, 10) << "restart deep-copy with flatten" << dendl;
send();
disable_append_requests();
if (r < 0 && r != -ENOENT) {
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
lderr(cct) << "error encountered during deep-copy: " << cpp_strerror(r)
<< dendl;
}
if (!m_copyup_required && !is_update_object_map_required(r)) {
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
if (r == -ENOENT) {
r = 0;
compute_deep_copy_snap_ids();
}
- m_lock.Unlock();
- m_image_ctx->image_lock.put_read();
+ m_lock.unlock();
+ m_image_ctx->image_lock.unlock_shared();
update_object_maps();
}
template <typename I>
void CopyupRequest<I>::update_object_maps() {
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
+ std::shared_lock image_locker{m_image_ctx->image_lock};
if (m_image_ctx->object_map == nullptr) {
image_locker.unlock();
owner_locker.unlock();
template <typename I>
void CopyupRequest<I>::copyup() {
auto cct = m_image_ctx->cct;
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
auto snapc = m_image_ctx->snapc;
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
- m_lock.Lock();
+ m_lock.lock();
if (!m_copyup_required) {
- m_lock.Unlock();
+ m_lock.unlock();
ldout(cct, 20) << "skipping copyup" << dendl;
finish(0);
++m_pending_copyups;
}
}
- m_lock.Unlock();
+ m_lock.unlock();
// issue librados ops at the end to simplify test cases
std::string oid(data_object_name(m_image_ctx, m_object_no));
auto cct = m_image_ctx->cct;
unsigned pending_copyups;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_pending_copyups > 0);
pending_copyups = --m_pending_copyups;
}
template <typename I>
void CopyupRequest<I>::disable_append_requests() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_append_request_permitted = false;
}
template <typename I>
void CopyupRequest<I>::remove_from_list() {
- Mutex::Locker copyup_list_locker(m_image_ctx->copyup_list_lock);
+ std::lock_guard copyup_list_locker{m_image_ctx->copyup_list_lock};
auto it = m_image_ctx->copyup_list.find(m_object_no);
if (it != m_image_ctx->copyup_list.end()) {
template <typename I>
bool CopyupRequest<I>::is_copyup_required() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
bool copy_on_read = m_pending_requests.empty();
if (copy_on_read) {
template <typename I>
bool CopyupRequest<I>::is_deep_copy() const {
- ceph_assert(m_image_ctx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
return !m_image_ctx->migration_info.empty();
}
template <typename I>
bool CopyupRequest<I>::is_update_object_map_required(int r) {
- ceph_assert(m_image_ctx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
if (r < 0) {
return false;
template <typename I>
void CopyupRequest<I>::compute_deep_copy_snap_ids() {
- ceph_assert(m_image_ctx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx->image_lock));
// don't copy ids for the snaps updated by object deep copy or
// that don't overlap
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/zipkin_trace.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/Types.h"
std::vector<uint64_t> m_snap_ids;
bool m_first_snap_is_clean = false;
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false);
WriteRequests m_pending_requests;
unsigned m_pending_copyups = 0;
total_bytes += image_extent.second;
}
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
auto total_bytes_read = ictx->total_bytes_read.fetch_add(total_bytes);
bool abort = (
ictx->readahead_disable_after_bytes != 0 &&
total_bytes_read > ictx->readahead_disable_after_bytes);
if (abort) {
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
return;
}
uint64_t image_size = ictx->get_image_size(ictx->snap_id);
auto snap_id = ictx->snap_id;
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
auto readahead_extent = ictx->readahead.update(image_extents, image_size);
uint64_t readahead_offset = readahead_extent.first;
template <typename I>
int ImageRequest<I>::clip_request() {
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
for (auto &image_extent : m_image_extents) {
auto clip_len = image_extent.second;
int r = clip_io(get_image_ctx(&m_image_ctx), image_extent.first, &clip_len);
utime_t ts = ceph_clock_now();
{
- RWLock::RLocker timestamp_locker(m_image_ctx.timestamp_lock);
+ std::shared_lock timestamp_locker{m_image_ctx.timestamp_lock};
if(!should_update_timestamp(ts, std::invoke(get_timestamp_fn, m_image_ctx),
update_interval)) {
return;
}
{
- RWLock::WLocker timestamp_locker(m_image_ctx.timestamp_lock);
+ std::unique_lock timestamp_locker{m_image_ctx.timestamp_lock};
bool update = should_update_timestamp(
ts, std::invoke(get_timestamp_fn, m_image_ctx), update_interval);
if (!update) {
{
// prevent image size from changing between computing clip and recording
// pending async operation
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
snap_id = image_ctx.snap_id;
}
{
// prevent image size from changing between computing clip and recording
// pending async operation
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.snap_id != CEPH_NOSNAP || image_ctx.read_only) {
aio_comp->fail(-EROFS);
return;
bool journaling = false;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
journaling = (m_flush_source == FLUSH_SOURCE_USER &&
image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending());
time_t ti, ThreadPool *tp)
: ThreadPool::PointerWQ<ImageDispatchSpec<I> >(name, ti, 0, tp),
m_image_ctx(*image_ctx),
- m_lock(util::unique_lock_name("ImageRequestWQ<I>::m_lock", this)) {
+ m_lock(ceph::make_shared_mutex(
+ util::unique_lock_name("ImageRequestWQ<I>::m_lock", this))) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << "ictx=" << image_ctx << dendl;
SafeTimer *timer;
- Mutex *timer_lock;
+ ceph::mutex *timer_lock;
ImageCtx::get_timer_instance(cct, &timer, &timer_lock);
for (auto flag : throttle_flags) {
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", off=" << off << ", "
<< "len = " << len << ", data_len " << bl.length() << dendl;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
ldout(cct, 20) << "compare_and_write ictx=" << &m_image_ctx << ", off="
<< off << ", " << "len = " << len << dendl;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&m_image_ctx), off, &len);
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
// if journaling is enabled -- we need to replay the journal because
// it might contain an uncommitted write
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty() ||
require_lock_on_read()) {
queue(ImageDispatchSpec<I>::create_read_request(
return;
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked()) {
queue(ImageDispatchSpec<I>::create_write_request(
m_image_ctx, c, {{off, len}}, std::move(bl), op_flags, trace));
return;
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked()) {
queue(ImageDispatchSpec<I>::create_discard_request(
m_image_ctx, c, off, len, discard_granularity_bytes, trace));
return;
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked() || !writes_empty()) {
queue(ImageDispatchSpec<I>::create_flush_request(
m_image_ctx, c, FLUSH_SOURCE_USER, trace));
return;
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked()) {
queue(ImageDispatchSpec<I>::create_write_same_request(
m_image_ctx, c, off, len, std::move(bl), op_flags, trace));
return;
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.non_blocking_aio || writes_blocked()) {
queue(ImageDispatchSpec<I>::create_compare_and_write_request(
m_image_ctx, c, {{off, len}}, std::move(cmp_bl), std::move(bl),
template <typename I>
void ImageRequestWQ<I>::shut_down(Context *on_shutdown) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
ceph_assert(!m_shutdown);
m_shutdown = true;
template <typename I>
void ImageRequestWQ<I>::block_writes(Context *on_blocked) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
++m_write_blockers;
ldout(cct, 5) << &m_image_ctx << ", " << "num="
<< m_write_blockers << dendl;
bool wake_up = false;
Contexts waiter_contexts;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
ceph_assert(m_write_blockers > 0);
--m_write_blockers;
template <typename I>
void ImageRequestWQ<I>::wait_on_writes_unblocked(Context *on_unblocked) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
ldout(cct, 20) << &m_image_ctx << ", " << "write_blockers="
<< m_write_blockers << dendl;
if (!m_unblocked_write_waiter_contexts.empty() || m_write_blockers > 0) {
bool wake_up = false;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
switch (direction) {
case DIRECTION_READ:
wake_up = (enabled != m_require_lock_on_read);
bool lock_required;
bool refresh_required = m_image_ctx.state->is_refresh_required();
{
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
bool write_op = peek_item->is_write_op();
lock_required = is_lock_required(write_op);
if (write_op) {
if (lock_required) {
this->get_pool_lock().unlock();
- m_image_ctx.owner_lock.get_read();
+ m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock != nullptr) {
ldout(cct, 5) << "exclusive lock required: delaying IO " << item << dendl;
if (!m_image_ctx.get_exclusive_lock_policy()->may_auto_request_lock()) {
// raced with the exclusive lock being disabled
lock_required = false;
}
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
this->get_pool_lock().lock();
if (lock_required) {
template <typename I>
void ImageRequestWQ<I>::finish_queued_io(ImageDispatchSpec<I> *req) {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
if (req->is_write_op()) {
ceph_assert(m_queued_writes > 0);
m_queued_writes--;
void ImageRequestWQ<I>::finish_in_flight_write() {
bool writes_blocked = false;
{
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
ceph_assert(m_in_flight_writes > 0);
if (--m_in_flight_writes == 0 &&
!m_write_blocker_contexts.empty()) {
template <typename I>
int ImageRequestWQ<I>::start_in_flight_io(AioCompletion *c) {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
if (m_shutdown) {
CephContext *cct = m_image_ctx.cct;
void ImageRequestWQ<I>::finish_in_flight_io() {
Context *on_shutdown;
{
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
if (--m_in_flight_ios > 0 || !m_shutdown) {
return;
}
template <typename I>
bool ImageRequestWQ<I>::is_lock_required(bool write_op) const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return ((write_op && m_require_lock_on_write) ||
(!write_op && m_require_lock_on_read));
}
template <typename I>
void ImageRequestWQ<I>::queue(ImageDispatchSpec<I> *req) {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "ictx=" << &m_image_ctx << ", "
void ImageRequestWQ<I>::handle_blocked_writes(int r) {
Contexts contexts;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
contexts.swap(m_write_blocker_contexts);
}
#define CEPH_LIBRBD_IO_IMAGE_REQUEST_WQ_H
#include "include/Context.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
#include "common/Throttle.h"
#include "common/WorkQueue.h"
#include "librbd/io/Types.h"
void shut_down(Context *on_shutdown);
inline bool writes_blocked() const {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
return (m_write_blockers > 0);
}
struct C_RefreshFinish;
ImageCtxT &m_image_ctx;
- mutable RWLock m_lock;
+ mutable ceph::shared_mutex m_lock;
Contexts m_write_blocker_contexts;
uint32_t m_write_blockers = 0;
Contexts m_unblocked_write_waiter_contexts;
bool is_lock_required(bool write_op) const;
inline bool require_lock_on_read() const {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
return m_require_lock_on_read;
}
inline bool writes_empty() const {
- RWLock::RLocker locker(m_lock);
+ std::shared_lock locker{m_lock};
return (m_queued_writes == 0);
}
void complete(int r) override {
while (true) {
- object_dispatcher->m_lock.get_read();
+ object_dispatcher->m_lock.lock_shared();
auto it = object_dispatcher->m_object_dispatches.upper_bound(
object_dispatch_layer);
if (it == object_dispatcher->m_object_dispatches.end()) {
- object_dispatcher->m_lock.put_read();
+ object_dispatcher->m_lock.unlock_shared();
Context::complete(r);
return;
}
// prevent recursive locking back into the dispatcher while handling IO
object_dispatch_meta.async_op_tracker->start_op();
- object_dispatcher->m_lock.put_read();
+ object_dispatcher->m_lock.unlock_shared();
// next loop should start after current layer
object_dispatch_layer = object_dispatch->get_object_dispatch_layer();
template <typename I>
ObjectDispatcher<I>::ObjectDispatcher(I* image_ctx)
: m_image_ctx(image_ctx),
- m_lock(librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock",
- this)) {
+ m_lock(ceph::make_shared_mutex(
+ librbd::util::unique_lock_name("librbd::io::ObjectDispatcher::lock",
+ this))) {
// configure the core object dispatch handler on startup
auto object_dispatch = new ObjectDispatch(image_ctx);
m_object_dispatches[object_dispatch->get_object_dispatch_layer()] =
std::map<ObjectDispatchLayer, ObjectDispatchMeta> object_dispatches;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
std::swap(object_dispatches, m_object_dispatches);
}
auto type = object_dispatch->get_object_dispatch_layer();
ldout(cct, 5) << "object_dispatch_layer=" << type << dendl;
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
ceph_assert(type < OBJECT_DISPATCH_LAYER_LAST);
auto result = m_object_dispatches.insert(
ObjectDispatchMeta object_dispatch_meta;
{
- RWLock::WLocker locker(m_lock);
+ std::unique_lock locker{m_lock};
auto it = m_object_dispatches.find(object_dispatch_layer);
ceph_assert(it != m_object_dispatches.end());
// apply the IO request to all layers -- this method will be re-invoked
// by the dispatch layer if continuing / restarting the IO
while (true) {
- m_lock.get_read();
+ m_lock.lock_shared();
object_dispatch_layer = object_dispatch_spec->object_dispatch_layer;
auto it = m_object_dispatches.upper_bound(object_dispatch_layer);
if (it == m_object_dispatches.end()) {
// the request is complete if handled by all layers
object_dispatch_spec->dispatch_result = DISPATCH_RESULT_COMPLETE;
- m_lock.put_read();
+ m_lock.unlock_shared();
break;
}
// prevent recursive locking back into the dispatcher while handling IO
object_dispatch_meta.async_op_tracker->start_op();
- m_lock.put_read();
+ m_lock.unlock_shared();
// advance to next layer in case we skip or continue
object_dispatch_spec->object_dispatch_layer =
#define CEPH_LIBRBD_IO_OBJECT_DISPATCHER_H
#include "include/int_types.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
#include "librbd/io/Types.h"
#include <map>
ImageCtxT* m_image_ctx;
- RWLock m_lock;
+ ceph::shared_mutex m_lock;
std::map<ObjectDispatchLayer, ObjectDispatchMeta> m_object_dispatches;
void send(ObjectDispatchSpec* object_dispatch_spec);
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
-#include "common/Mutex.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "include/Context.h"
#include "include/err.h"
template <typename I>
inline bool is_copy_on_read(I *ictx, librados::snap_t snap_id) {
- RWLock::RLocker image_locker(ictx->image_lock);
+ std::shared_lock image_locker{ictx->image_lock};
return (ictx->clone_copy_on_read &&
!ictx->read_only && snap_id == CEPH_NOSNAP &&
(ictx->exclusive_lock == nullptr ||
template <typename I>
bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents,
bool read_request) {
- ceph_assert(m_ictx->image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_ictx->image_lock));
m_has_parent = false;
parent_extents->clear();
void ObjectReadRequest<I>::read_object() {
I *image_ctx = this->m_ictx;
{
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock image_locker{image_ctx->image_lock};
if (image_ctx->object_map != nullptr &&
!image_ctx->object_map->object_may_exist(this->m_object_no)) {
image_ctx->op_work_queue->queue(new FunctionContext([this](int r) {
void ObjectReadRequest<I>::read_parent() {
I *image_ctx = this->m_ictx;
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock image_locker{image_ctx->image_lock};
// calculate reverse mapping onto the image
Extents parent_extents;
return;
}
- image_ctx->owner_lock.get_read();
- image_ctx->image_lock.get_read();
+ image_ctx->owner_lock.lock_shared();
+ image_ctx->image_lock.lock_shared();
Extents parent_extents;
if (!this->compute_parent_extents(&parent_extents, true) ||
(image_ctx->exclusive_lock != nullptr &&
!image_ctx->exclusive_lock->is_lock_owner())) {
- image_ctx->image_lock.put_read();
- image_ctx->owner_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
+ image_ctx->owner_lock.unlock_shared();
this->finish(0);
return;
}
ldout(image_ctx->cct, 20) << dendl;
- image_ctx->copyup_list_lock.Lock();
+ image_ctx->copyup_list_lock.lock();
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
// create and kick off a CopyupRequest
image_ctx, this->m_object_no, std::move(parent_extents), this->m_trace);
image_ctx->copyup_list[this->m_object_no] = new_req;
- image_ctx->copyup_list_lock.Unlock();
- image_ctx->image_lock.put_read();
+ image_ctx->copyup_list_lock.unlock();
+ image_ctx->image_lock.unlock_shared();
new_req->send();
} else {
- image_ctx->copyup_list_lock.Unlock();
- image_ctx->image_lock.put_read();
+ image_ctx->copyup_list_lock.unlock();
+ image_ctx->image_lock.unlock_shared();
}
- image_ctx->owner_lock.put_read();
+ image_ctx->owner_lock.unlock_shared();
this->finish(0);
}
compute_parent_info();
- ictx->image_lock.get_read();
+ ictx->image_lock.lock_shared();
if (!ictx->migration_info.empty()) {
m_guarding_migration_write = true;
}
- ictx->image_lock.put_read();
+ ictx->image_lock.unlock_shared();
}
template <typename I>
void AbstractObjectWriteRequest<I>::compute_parent_info() {
I *image_ctx = this->m_ictx;
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock image_locker{image_ctx->image_lock};
this->compute_parent_extents(&m_parent_extents, false);
void AbstractObjectWriteRequest<I>::add_write_hint(
librados::ObjectWriteOperation *wr) {
I *image_ctx = this->m_ictx;
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock image_locker{image_ctx->image_lock};
if (image_ctx->object_map == nullptr || !this->m_object_may_exist) {
ObjectRequest<I>::add_write_hint(*image_ctx, wr);
}
<< this->m_object_off << "~" << this->m_object_len
<< dendl;
{
- RWLock::RLocker image_lock(image_ctx->image_lock);
+ std::shared_lock image_lock{image_ctx->image_lock};
if (image_ctx->object_map == nullptr) {
m_object_may_exist = true;
} else {
void AbstractObjectWriteRequest<I>::pre_write_object_map_update() {
I *image_ctx = this->m_ictx;
- image_ctx->image_lock.get_read();
+ image_ctx->image_lock.lock_shared();
if (image_ctx->object_map == nullptr || !is_object_map_update_enabled()) {
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
write_object();
return;
}
if (!m_object_may_exist && m_copyup_enabled) {
// optimization: copyup required
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
copyup();
return;
}
&AbstractObjectWriteRequest<I>::handle_pre_write_object_map_update>(
CEPH_NOSNAP, this->m_object_no, new_state, {}, this->m_trace, false,
this)) {
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
return;
}
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
write_object();
}
return;
}
} else if (r == -ERANGE && m_guarding_migration_write) {
- image_ctx->image_lock.get_read();
+ image_ctx->image_lock.lock_shared();
m_guarding_migration_write = !image_ctx->migration_info.empty();
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
if (m_guarding_migration_write) {
copyup();
ceph_assert(!m_copyup_in_progress);
m_copyup_in_progress = true;
- image_ctx->copyup_list_lock.Lock();
+ image_ctx->copyup_list_lock.lock();
auto it = image_ctx->copyup_list.find(this->m_object_no);
if (it == image_ctx->copyup_list.end()) {
auto new_req = CopyupRequest<I>::create(
new_req->append_request(this);
image_ctx->copyup_list[this->m_object_no] = new_req;
- image_ctx->copyup_list_lock.Unlock();
+ image_ctx->copyup_list_lock.unlock();
new_req->send();
} else {
it->second->append_request(this);
- image_ctx->copyup_list_lock.Unlock();
+ image_ctx->copyup_list_lock.unlock();
}
}
void AbstractObjectWriteRequest<I>::post_write_object_map_update() {
I *image_ctx = this->m_ictx;
- image_ctx->image_lock.get_read();
+ image_ctx->image_lock.lock_shared();
if (image_ctx->object_map == nullptr || !is_object_map_update_enabled() ||
!is_non_existent_post_write_object_map_state()) {
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
this->finish(0);
return;
}
&AbstractObjectWriteRequest<I>::handle_post_write_object_map_update>(
CEPH_NOSNAP, this->m_object_no, OBJECT_NONEXISTENT, OBJECT_PENDING,
this->m_trace, false, this)) {
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
return;
}
- image_ctx->image_lock.put_read();
+ image_ctx->image_lock.unlock_shared();
this->finish(0);
}
template <typename I>
void SimpleSchedulerObjectDispatch<I>::ObjectRequests::dispatch_delayed_requests(
- I *image_ctx, LatencyStats *latency_stats, Mutex *latency_stats_lock) {
+ I *image_ctx, LatencyStats *latency_stats, ceph::mutex *latency_stats_lock) {
for (auto &it : m_delayed_requests) {
auto offset = it.first;
auto &merged_requests = it.second;
[requests=std::move(merged_requests.requests), latency_stats,
latency_stats_lock, start_time=ceph_clock_now()](int r) {
if (latency_stats) {
- Mutex::Locker locker(*latency_stats_lock);
+ std::lock_guard locker{*latency_stats_lock};
auto latency = ceph_clock_now() - start_time;
latency_stats->add(latency.to_nsec());
}
req->send();
}
- m_dispatch_time = utime_t();
+ m_dispatch_time = {};
}
template <typename I>
SimpleSchedulerObjectDispatch<I>::SimpleSchedulerObjectDispatch(
I* image_ctx)
: m_image_ctx(image_ctx),
- m_lock(librbd::util::unique_lock_name(
- "librbd::io::SimpleSchedulerObjectDispatch::lock", this)),
+ m_lock(ceph::make_mutex(librbd::util::unique_lock_name(
+ "librbd::io::SimpleSchedulerObjectDispatch::lock", this))),
m_max_delay(image_ctx->config.template get_val<uint64_t>(
"rbd_io_scheduler_simple_max_delay")) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (intersects(object_no, object_off, object_len)) {
dispatch_delayed_requests(object_no);
}
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << data.length() << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (try_delay_write(object_no, object_off, std::move(data), snapc, op_flags,
*object_dispatch_flags, on_dispatched)) {
*dispatch_result = DISPATCH_RESULT_COMPLETE;
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << object_len << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
<< object_off << "~" << cmp_data.length() << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
register_in_flight_request(object_no, {}, on_finish);
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dispatch_all_delayed_requests();
return false;
template <typename I>
bool SimpleSchedulerObjectDispatch<I>::intersects(
uint64_t object_no, uint64_t object_off, uint64_t len) const {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
auto it = m_requests.find(object_no);
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
const ::SnapContext &snapc, int op_flags, int object_dispatch_flags,
Context* on_dispatched) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
if (m_latency_stats && !m_latency_stats->is_ready()) {
// schedule dispatch on the first request added
if (delayed && !object_requests->is_scheduled_dispatch()) {
- auto dispatch_time = ceph_clock_now();
+ auto dispatch_time = ceph::real_clock::now();
if (m_latency_stats) {
- dispatch_time += utime_t(0, m_latency_stats->avg() / 2);
+ dispatch_time += std::chrono::nanoseconds(m_latency_stats->avg() / 2);
} else {
- dispatch_time += utime_t(0, m_max_delay * 1000000);
+ dispatch_time += std::chrono::milliseconds(m_max_delay);
}
object_requests->set_scheduled_dispatch(dispatch_time);
m_dispatch_queue.push_back(object_requests);
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_all_delayed_requests() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
[this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) {
ctx->complete(r);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_latency_stats && start_time != utime_t()) {
auto latency = ceph_clock_now() - start_time;
m_latency_stats->add(latency.to_nsec());
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
uint64_t object_no) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
auto it = m_requests.find(object_no);
template <typename I>
void SimpleSchedulerObjectDispatch<I>::dispatch_delayed_requests(
ObjectRequestsRef object_requests) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_requests->get_object_no() << ", "
template <typename I>
void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto cct = m_image_ctx->cct;
- Mutex::Locker timer_locker(*m_timer_lock);
+ std::lock_guard timer_locker{*m_timer_lock};
if (m_timer_task != nullptr) {
ldout(cct, 20) << "canceling task " << m_timer_task << dendl;
m_timer_task = new FunctionContext(
[this, object_no=object_requests->get_object_no()](int r) {
- ceph_assert(m_timer_lock->is_locked());
+ ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "running timer task " << m_timer_task << dendl;
m_image_ctx->op_work_queue->queue(
new FunctionContext(
[this, object_no](int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dispatch_delayed_requests(object_no);
}), 0);
});
#ifndef CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_IO_SIMPLE_SCHEDULER_OBJECT_DISPATCH_H
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/snap_types.h"
#include "include/interval_set.h"
#include "include/utime.h"
class ObjectRequests {
public:
+ using clock_t = ceph::real_clock;
+
ObjectRequests(uint64_t object_no) : m_object_no(object_no) {
}
return m_dispatch_seq;
}
- utime_t get_dispatch_time() const {
+ clock_t::time_point get_dispatch_time() const {
return m_dispatch_time;
}
- void set_scheduled_dispatch(const utime_t &dispatch_time) {
+ void set_scheduled_dispatch(const clock_t::time_point &dispatch_time) {
m_dispatch_time = dispatch_time;
}
bool is_scheduled_dispatch() const {
- return m_dispatch_time != utime_t();
+ return !clock_t::is_zero(m_dispatch_time);
}
size_t delayed_requests_size() const {
void dispatch_delayed_requests(ImageCtxT *image_ctx,
LatencyStats *latency_stats,
- Mutex *latency_stats_lock);
+ ceph::mutex *latency_stats_lock);
private:
uint64_t m_object_no;
uint64_t m_dispatch_seq = 0;
- utime_t m_dispatch_time;
+ clock_t::time_point m_dispatch_time;
SnapContext m_snapc = {0, {}};
int m_op_flags = 0;
int m_object_dispatch_flags = 0;
ImageCtxT *m_image_ctx;
- Mutex m_lock;
+ ceph::mutex m_lock;
SafeTimer *m_timer;
- Mutex *m_timer_lock;
+ ceph::mutex *m_timer_lock;
uint64_t m_max_delay;
uint64_t m_dispatch_seq = 0;
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/ImageCtx.h"
#include "journal/Journaler.h"
#include "librbd/journal/Types.h"
bufferlist m_bl;
Journaler *m_journaler;
SafeTimer *m_timer;
- Mutex *m_timer_lock;
+ ceph::mutex *m_timer_lock;
int m_r_saved;
int64_t m_pool_id = -1;
template <typename I>
DemoteRequest<I>::DemoteRequest(I &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish),
- m_lock("DemoteRequest::m_lock") {
+ m_lock(ceph::make_mutex("DemoteRequest::m_lock")) {
}
template <typename I>
#ifndef CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_DEMOTE_REQUEST_H
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Future.h"
#include "librbd/journal/Types.h"
Journaler *m_journaler = nullptr;
int m_ret_val = 0;
- Mutex m_lock;
+ ceph::mutex m_lock;
ImageClientMeta m_client_meta;
uint64_t m_tag_tid = 0;
TagData m_tag_data;
using util::C_DecodeTags;
template <typename I>
-OpenRequest<I>::OpenRequest(I *image_ctx, Journaler *journaler, Mutex *lock,
+OpenRequest<I>::OpenRequest(I *image_ctx, Journaler *journaler, ceph::mutex *lock,
journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish)
m_tag_class = image_client_meta->tag_class;
{
- Mutex::Locker locker(*m_lock);
+ std::lock_guard locker{*m_lock};
*m_client_meta = *image_client_meta;
}
#ifndef CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
#define CEPH_LIBRBD_JOURNAL_OPEN_REQUEST_H
+#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "librbd/journal/TypeTraits.h"
struct Context;
-struct Mutex;
namespace librbd {
typedef typename TypeTraits<ImageCtxT>::Journaler Journaler;
static OpenRequest* create(ImageCtxT *image_ctx, Journaler *journaler,
- Mutex *lock, journal::ImageClientMeta *client_meta,
+ ceph::mutex *lock, journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish) {
return new OpenRequest(image_ctx, journaler, lock, client_meta, tag_tid,
tag_data, on_finish);
}
- OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, Mutex *lock,
+ OpenRequest(ImageCtxT *image_ctx, Journaler *journaler, ceph::mutex *lock,
journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
journal::TagData *tag_data, Context *on_finish);
ImageCtxT *m_image_ctx;
Journaler *m_journaler;
- Mutex *m_lock;
+ ceph::mutex *m_lock;
journal::ImageClientMeta *m_client_meta;
uint64_t *m_tag_tid;
journal::TagData *m_tag_data;
template <typename I>
PromoteRequest<I>::PromoteRequest(I *image_ctx, bool force, Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_on_finish(on_finish),
- m_lock("PromoteRequest::m_lock") {
+ m_lock(ceph::make_mutex("PromoteRequest::m_lock")) {
}
template <typename I>
#define CEPH_LIBRBD_JOURNAL_PROMOTE_REQUEST_H
#include "include/int_types.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Future.h"
#include "librbd/journal/Types.h"
Journaler *m_journaler = nullptr;
int m_ret_val = 0;
- Mutex m_lock;
+ ceph::mutex m_lock;
ImageClientMeta m_client_meta;
uint64_t m_tag_tid = 0;
TagData m_tag_data;
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
#include "librbd/ImageCtx.h"
#include "journal/Journaler.h"
#include "librbd/journal/TypeTraits.h"
CephContext *m_cct;
Journaler *m_journaler;
SafeTimer *m_timer;
- Mutex *m_timer_lock;
+ ceph::mutex *m_timer_lock;
int m_r_saved;
void stat_journal();
}
ldout(cct, 20) << ": ExecuteOp::" << __func__ << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock == nullptr ||
!image_ctx.exclusive_lock->accept_ops()) {
template <typename I>
Replay<I>::Replay(I &image_ctx)
- : m_image_ctx(image_ctx), m_lock("Replay<I>::m_lock") {
+ : m_image_ctx(image_ctx) {
}
template <typename I>
on_ready = util::create_async_context_callback(m_image_ctx, on_ready);
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->accept_ops()) {
ldout(cct, 5) << ": lost exclusive lock -- skipping event" << dendl;
m_image_ctx, on_finish);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
// safely commit any remaining AIO modify operations
if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) {
// execute the following outside of lock scope
if (flush_comp != nullptr) {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
void Replay<I>::flush(Context *on_finish) {
io::AioCompletion *aio_comp;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
aio_comp = create_aio_flush_completion(
util::create_async_context_callback(m_image_ctx, on_finish));
if (aio_comp == nullptr) {
}
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
io::ImageRequest<I>::aio_flush(&m_image_ctx, aio_comp,
io::FLUSH_SOURCE_INTERNAL, {});
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": op_tid=" << op_tid << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(op_tid);
ceph_assert(op_it != m_op_events.end());
}
if (flush_required) {
- m_lock.Lock();
+ m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
- m_lock.Unlock();
+ m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
}
if (flush_required) {
- m_lock.Lock();
+ m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
- m_lock.Unlock();
+ m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::AioCompletion *aio_comp;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
aio_comp = create_aio_flush_completion(on_safe);
}
}
if (flush_required) {
- m_lock.Lock();
+ m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
- m_lock.Unlock();
+ m_lock.unlock();
if (flush_comp != nullptr) {
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
}
if (flush_required) {
- m_lock.Lock();
+ m_lock.lock();
auto flush_comp = create_aio_flush_completion(nullptr);
- m_lock.Unlock();
+ m_lock.unlock();
io::ImageRequest<I>::aio_flush(&m_image_ctx, flush_comp,
io::FLUSH_SOURCE_INTERNAL, {});
Context *on_op_complete = nullptr;
Context *on_op_finish_event = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(event.op_tid);
if (op_it == m_op_events.end()) {
ldout(cct, 10) << ": unable to locate associated op: assuming previously "
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap create event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap remove event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap rename event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap protect event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap unprotect event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap rollback start event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Rename event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Resize start event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Flatten start event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Snap limit event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Update features event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Metadata set event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": Metadata remove event" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
OpEvent *op_event;
Context *on_op_complete = create_op_context_callback(event.op_tid, on_ready,
on_safe, &op_event);
template <typename I>
void Replay<I>::handle_aio_modify_complete(Context *on_ready, Context *on_safe,
int r, std::set<int> &filters) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << ": on_ready=" << on_ready << ", "
<< "on_safe=" << on_safe << ", r=" << r << dendl;
Context *on_aio_ready = nullptr;
Context *on_flush = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_aio_flush > 0);
ceph_assert(m_in_flight_aio_modify >= on_safe_ctxs.size());
--m_in_flight_aio_flush;
return nullptr;
}
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_op_events.count(op_tid) != 0) {
lderr(cct) << ": duplicate op tid detected: " << op_tid << dendl;
OpEvent op_event;
bool shutting_down = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto op_it = m_op_events.find(op_tid);
ceph_assert(op_it != m_op_events.end());
// dropped -- handle if pending
Context *on_flush = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_op_events > 0);
--m_in_flight_op_events;
if (m_in_flight_op_events == 0 &&
io::aio_type_t aio_type,
bool *flush_required,
std::set<int> &&filters) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ceph_assert(m_on_aio_ready == nullptr);
template <typename I>
io::AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_safe) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx.cct;
if (m_shut_down) {
bool Replay<I>::clipped_io(uint64_t image_offset, io::AioCompletion *aio_comp) {
CephContext *cct = m_image_ctx.cct;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
size_t image_size = m_image_ctx.size;
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
if (image_offset >= image_size) {
// rbd-mirror image sync might race an IO event w/ associated resize between
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/io/Types.h"
#include "librbd/journal/Types.h"
#include <boost/variant.hpp>
ImageCtxT &m_image_ctx;
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("Replay<I>::m_lock");
uint64_t m_in_flight_aio_flush = 0;
uint64_t m_in_flight_aio_modify = 0;
#include "include/buffer.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
-#include "common/Mutex.h"
#include "librbd/journal/TypeTraits.h"
#include <string>
return r;
}
- Mutex::Locker locker(*lock);
+ std::lock_guard locker{*lock};
*tag_tid = tag.tid;
auto data_it = tag.data.cbegin();
return -ENOENT;
}
- Mutex::Locker locker(*lock);
+ std::lock_guard locker{*lock};
*tag_tid = tags.back().tid;
auto data_it = tags.back().data.cbegin();
r = C_DecodeTag::decode(&data_it, tag_data);
#include <list>
struct CephContext;
-struct Mutex;
namespace librbd {
namespace journal {
struct C_DecodeTag : public Context {
CephContext *cct;
- Mutex *lock;
+ ceph::mutex *lock;
uint64_t *tag_tid;
TagData *tag_data;
Context *on_finish;
cls::journal::Tag tag;
- C_DecodeTag(CephContext *cct, Mutex *lock, uint64_t *tag_tid,
+ C_DecodeTag(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
TagData *tag_data, Context *on_finish)
: cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
on_finish(on_finish) {
typedef std::list<cls::journal::Tag> Tags;
CephContext *cct;
- Mutex *lock;
+ ceph::mutex *lock;
uint64_t *tag_tid;
TagData *tag_data;
Context *on_finish;
Tags tags;
- C_DecodeTags(CephContext *cct, Mutex *lock, uint64_t *tag_tid,
+ C_DecodeTags(CephContext *cct, ceph::mutex *lock, uint64_t *tag_tid,
TagData *tag_data, Context *on_finish)
: cct(cct), lock(lock), tag_tid(tag_tid), tag_data(tag_data),
on_finish(on_finish) {
tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
- RWLock::RLocker timestamp_locker(ictx->timestamp_lock);
+ std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_access_timestamp();
time.to_timespec(timestamp);
}
tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
- RWLock::RLocker timestamp_locker(ictx->timestamp_lock);
+ std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_modify_timestamp();
time.to_timespec(timestamp);
}
void DemoteRequest<I>::acquire_lock() {
CephContext *cct = m_image_ctx.cct;
- m_image_ctx.owner_lock.get_read();
+ m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock == nullptr) {
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
lderr(cct) << "exclusive lock is not active" << dendl;
finish(-EINVAL);
return;
m_blocked_requests = true;
if (m_image_ctx.exclusive_lock->is_lock_owner()) {
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
demote();
return;
}
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_acquire_lock>(this);
m_image_ctx.exclusive_lock->acquire_lock(ctx);
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
}
template <typename I>
return;
}
- m_image_ctx.owner_lock.get_read();
+ m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
r = m_image_ctx.exclusive_lock->get_unlocked_op_error();
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
lderr(cct) << "failed to acquire exclusive lock" << dendl;
finish(r);
return;
}
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
demote();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
- m_image_ctx.owner_lock.get_read();
+ m_image_ctx.owner_lock.lock_shared();
if (m_image_ctx.exclusive_lock == nullptr) {
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
finish(0);
return;
}
auto ctx = create_context_callback<
DemoteRequest<I>, &DemoteRequest<I>::handle_release_lock>(this);
m_image_ctx.exclusive_lock->release_lock(ctx);
- m_image_ctx.owner_lock.put_read();
+ m_image_ctx.owner_lock.unlock_shared();
}
template <typename I>
}
{
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_blocked_requests && m_image_ctx.exclusive_lock != nullptr) {
m_image_ctx.exclusive_lock->unblock_requests();
}
DisableRequest<I>::DisableRequest(I *image_ctx, bool force, bool remove,
Context *on_finish)
: m_image_ctx(image_ctx), m_force(force), m_remove(remove),
- m_on_finish(on_finish), m_lock("mirror::DisableRequest::m_lock") {
+ m_on_finish(on_finish) {
}
template <typename I>
return m_on_finish;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_current_ops.empty());
ldout(cct, 10) << this << " " << __func__ << ": client_id=" << client_id
<< ", snap_name=" << snap_name << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_current_ops[client_id]++;
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_current_ops[client_id] > 0);
m_current_ops[client_id]--;
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_current_ops[client_id] == 0);
Context *ctx = create_context_callback(
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_current_ops[client_id] == 0);
m_current_ops.erase(client_id);
#define CEPH_LIBRBD_MIRROR_DISABLE_REQUEST_H
#include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "cls/rbd/cls_rbd_types.h"
#include <map>
std::map<std::string, int> m_ret;
std::map<std::string, int> m_current_ops;
int m_error_result = 0;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock =
+ ceph::make_mutex("mirror::DisableRequest::m_lock");
void send_get_mirror_image();
Context *handle_get_mirror_image(int *result);
uint64_t max_size = m_image_ctx->size;
{
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
max_size = std::max(max_size, it.second.size);
#define CEPH_LIBRBD_OBJECT_MAP_CREATE_REQUEST_H
#include "include/buffer.h"
-#include "common/Mutex.h"
#include <map>
#include <string>
template <typename I>
void InvalidateRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
- ceph_assert(image_ctx.image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_wlocked(image_ctx.image_lock));
uint64_t snap_flags;
int r = image_ctx.get_flags(m_snap_id, &snap_flags);
namespace object_map {
template <typename I>
-RefreshRequest<I>::RefreshRequest(I &image_ctx, RWLock* object_map_lock,
+RefreshRequest<I>::RefreshRequest(I &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish)
: m_image_ctx(image_ctx), m_object_map_lock(object_map_lock),
template <typename I>
void RefreshRequest<I>::send() {
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
m_object_count = Striper::get_num_objects(
m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
}
void RefreshRequest<I>::apply() {
uint64_t num_objs;
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
num_objs = Striper::get_num_objects(
m_image_ctx.layout, m_image_ctx.get_image_size(m_snap_id));
}
ceph_assert(m_on_disk_object_map.size() >= num_objs);
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ std::unique_lock object_map_locker{*m_object_map_lock};
*m_object_map = m_on_disk_object_map;
}
InvalidateRequest<I> *req = InvalidateRequest<I>::create(
m_image_ctx, m_snap_id, true, ctx);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
InvalidateRequest<I> *req = InvalidateRequest<I>::create(
m_image_ctx, m_snap_id, true, ctx);
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
m_image_ctx, m_snap_id, false, ctx);
lderr(cct) << "object map too large: " << m_object_count << dendl;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
*ret_val = -EFBIG;
}
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ std::unique_lock object_map_locker{*m_object_map_lock};
m_object_map->clear();
return m_on_finish;
}
#include "include/int_types.h"
#include "include/buffer.h"
#include "common/bit_vector.hpp"
+#include "common/ceph_mutex.h"
class Context;
class RWLock;
template <typename ImageCtxT = ImageCtx>
class RefreshRequest {
public:
- static RefreshRequest *create(ImageCtxT &image_ctx, RWLock* object_map_lock,
+ static RefreshRequest *create(ImageCtxT &image_ctx,
+ ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, Context *on_finish) {
return new RefreshRequest(image_ctx, object_map_lock, object_map, snap_id,
on_finish);
}
- RefreshRequest(ImageCtxT &image_ctx, RWLock* object_map_lock,
+ RefreshRequest(ImageCtxT &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish);
*/
ImageCtxT &m_image_ctx;
- RWLock* m_object_map_lock;
+ ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_snap_id;
Context *m_on_finish;
template <typename I>
RemoveRequest<I>::RemoveRequest(I *image_ctx, Context *on_finish)
- : m_image_ctx(image_ctx), m_on_finish(on_finish),
- m_lock("object_map::RemoveRequest::m_lock") {
+ : m_image_ctx(image_ctx), m_on_finish(on_finish) {
}
template <typename I>
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
std::vector<uint64_t> snap_ids;
snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
snap_ids.push_back(it.first);
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_ref_counter == 0);
for (auto snap_id : snap_ids) {
ldout(cct, 20) << __func__ << ": r=" << *result << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_ref_counter > 0);
m_ref_counter--;
#define CEPH_LIBRBD_OBJECT_MAP_REMOVE_REQUEST_H
#include "include/buffer.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include <map>
#include <string>
int m_error_result = 0;
int m_ref_counter = 0;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock =
+ ceph::make_mutex("object_map::RemoveRequest::m_lock");
void send_remove_object_map();
Context *handle_remove_object_map(int *result);
m_state = STATE_INVALIDATE;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
InvalidateRequest<> *req = new InvalidateRequest<>(m_image_ctx, m_snap_id,
true,
create_callback_context());
void ResizeRequest::send() {
CephContext *cct = m_image_ctx.cct;
- RWLock::WLocker l(*m_object_map_lock);
+ std::unique_lock l{*m_object_map_lock};
m_num_objs = Striper::get_num_objects(m_image_ctx.layout, m_new_size);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(cct, 5) << this << " resizing in-memory object map: "
<< m_num_objs << dendl;
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ std::unique_lock object_map_locker{*m_object_map_lock};
resize(m_object_map, m_num_objs, m_default_object_state);
}
class ResizeRequest : public Request {
public:
- ResizeRequest(ImageCtx &image_ctx, RWLock *object_map_lock,
+ ResizeRequest(ImageCtx &image_ctx, ceph::shared_mutex *object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
uint64_t new_size, uint8_t default_object_state,
Context *on_finish)
void finish_request() override;
private:
- RWLock* m_object_map_lock;
+ ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> *m_object_map;
uint64_t m_num_objs;
uint64_t m_new_size;
return Request::should_complete(r);
}
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
bool finished = false;
switch (m_state) {
case STATE_READ_MAP:
}
void SnapshotCreateRequest::send_read_map() {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
CephContext *cct = m_image_ctx.cct;
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, CEPH_NOSNAP));
}
bool SnapshotCreateRequest::send_add_snapshot() {
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) == 0) {
return true;
}
}
void SnapshotCreateRequest::update_object_map() {
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ std::unique_lock object_map_locker{*m_object_map_lock};
auto it = m_object_map.begin();
auto end_it = m_object_map.end();
STATE_ADD_SNAPSHOT
};
- SnapshotCreateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ SnapshotCreateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish)
: Request(image_ctx, snap_id, on_finish),
bool should_complete(int r) override;
private:
- RWLock* m_object_map_lock;
+ ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
State m_state = STATE_READ_MAP;
namespace object_map {
void SnapshotRemoveRequest::send() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- ceph_assert(m_image_ctx.image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
int r = m_image_ctx.get_flags(m_snap_id, &m_flags);
lderr(cct) << "failed to load object map " << oid << ": "
<< cpp_strerror(r) << dendl;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
if ((m_flags & RBD_FLAG_OBJECT_MAP_INVALID) != 0) {
// snapshot object map exists on disk but is invalid. cannot clean fast-diff
// on next snapshot if current snapshot was invalid.
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
lderr(cct) << "failed to remove object map snapshot " << oid << ": "
<< cpp_strerror(r) << dendl;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
invalidate_next_map();
return;
}
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
update_object_map();
remove_map();
}
void SnapshotRemoveRequest::invalidate_next_map() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- ceph_assert(m_image_ctx.image_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
}
void SnapshotRemoveRequest::compute_next_snap_id() {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
m_next_snap_id = CEPH_NOSNAP;
std::map<librados::snap_t, SnapInfo>::const_iterator it =
}
void SnapshotRemoveRequest::update_object_map() {
- assert(m_image_ctx.image_lock.is_locked());
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+ std::unique_lock object_map_locker{*m_object_map_lock};
if (m_next_snap_id == m_image_ctx.snap_id && m_next_snap_id == CEPH_NOSNAP) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << dendl;
#include "common/bit_vector.hpp"
#include "librbd/AsyncRequest.h"
-class RWLock;
-
namespace librbd {
namespace object_map {
* otherwise, the state machine proceeds to remove the object map.
*/
- SnapshotRemoveRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ SnapshotRemoveRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
Context *on_finish)
: AsyncRequest(image_ctx, on_finish),
}
private:
- RWLock* m_object_map_lock;
+ ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_snap_id;
uint64_t m_next_snap_id;
}
void SnapshotRollbackRequest::send_write_map() {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
CephContext *cct = m_image_ctx.cct;
std::string snap_oid(ObjectMap<>::object_map_name(m_image_ctx.id,
}
void SnapshotRollbackRequest::send_invalidate_map() {
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock owner_locker{m_image_ctx.owner_lock};
+ std::unique_lock image_locker{m_image_ctx.image_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void UpdateRequest<I>::update_object_map() {
- ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_object_map_lock->is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+ ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
CephContext *cct = m_image_ctx.cct;
// break very large requests into manageable batches
}
{
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
- RWLock::WLocker object_map_locker(*m_object_map_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
+ std::unique_lock object_map_locker{*m_object_map_lock};
update_in_memory_object_map();
if (m_update_end_object_no < m_end_object_no) {
template <typename I>
void UpdateRequest<I>::update_in_memory_object_map() {
- ceph_assert(m_image_ctx.image_lock.is_locked());
- ceph_assert(m_object_map_lock->is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
+ ceph_assert(ceph_mutex_is_locked(*m_object_map_lock));
// rebuilding the object map might update on-disk only
if (m_snap_id == m_image_ctx.snap_id) {
#include <boost/optional.hpp>
class Context;
-class RWLock;
namespace librbd {
class UpdateRequest : public Request {
public:
static UpdateRequest *create(ImageCtx &image_ctx,
- RWLock* object_map_lock,
+ ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map,
uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
on_finish);
}
- UpdateRequest(ImageCtx &image_ctx, RWLock* object_map_lock,
+ UpdateRequest(ImageCtx &image_ctx, ceph::shared_mutex* object_map_lock,
ceph::BitVector<2> *object_map, uint64_t snap_id,
uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
* @endverbatim
*/
- RWLock* m_object_map_lock;
+ ceph::shared_mutex* m_object_map_lock;
ceph::BitVector<2> &m_object_map;
uint64_t m_start_object_no;
uint64_t m_end_object_no;
void DisableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
image_ctx.io_work_queue->block_writes(create_context_callback<
DisableFeaturesRequest<I>,
&DisableFeaturesRequest<I>::handle_block_writes>(this));
m_writes_blocked = true;
{
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
// avoid accepting new requests from peers while we manipulate
// the image features
if (image_ctx.exclusive_lock != nullptr &&
&DisableFeaturesRequest<I>::handle_acquire_exclusive_lock>(this);
{
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
// if disabling features w/ exclusive lock supported, we need to
// acquire the lock to temporarily block IO against the image
if (image_ctx.exclusive_lock != nullptr &&
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << *result << dendl;
- image_ctx.owner_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
if (*result < 0) {
lderr(cct) << "failed to lock image: " << cpp_strerror(*result) << dendl;
- image_ctx.owner_lock.put_read();
+ image_ctx.owner_lock.unlock_shared();
return handle_finish(*result);
} else if (image_ctx.exclusive_lock != nullptr &&
!image_ctx.exclusive_lock->is_lock_owner()) {
lderr(cct) << "failed to acquire exclusive lock" << dendl;
*result = image_ctx.exclusive_lock->get_unlocked_op_error();
- image_ctx.owner_lock.put_read();
+ image_ctx.owner_lock.unlock_shared();
return handle_finish(*result);
}
m_disable_flags |= RBD_FLAG_OBJECT_MAP_INVALID;
}
} while (false);
- image_ctx.owner_lock.put_read();
+ image_ctx.owner_lock.unlock_shared();
if (*result < 0) {
return handle_finish(*result);
CephContext *cct = image_ctx.cct;
{
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.journal != nullptr) {
ldout(cct, 20) << this << " " << __func__ << dendl;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
{
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
image_ctx.exclusive_lock->unblock_requests();
}
void EnableFeaturesRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(cct, 20) << this << " " << __func__ << ": features=" << m_features
<< dendl;
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
image_ctx.io_work_queue->block_writes(create_context_callback<
EnableFeaturesRequest<I>,
&EnableFeaturesRequest<I>::handle_block_writes>(this));
bool create_journal = false;
do {
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
// avoid accepting new requests from peers while we manipulate
// the image features
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
{
- RWLock::WLocker locker(image_ctx.owner_lock);
+ std::unique_lock locker{image_ctx.owner_lock};
if (image_ctx.exclusive_lock != nullptr && m_requests_blocked) {
image_ctx.exclusive_lock->unblock_requests();
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
}
{
- RWLock::RLocker image_lock(image_ctx.image_lock);
+ std::shared_lock image_lock{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_not_exist(m_object_no)) {
// can skip because the object already exists
template <typename I>
void FlattenRequest<I>::flatten_objects() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- assert(image_ctx.owner_lock.is_locked());
+ assert(ceph_mutex_is_locked(image_ctx.owner_lock));
auto ctx = create_context_callback<
FlattenRequest<I>,
&FlattenRequest<I>::handle_flatten_objects>(this);
CephContext *cct = image_ctx.cct;
// should have been canceled prior to releasing lock
- image_ctx.owner_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// if there are no snaps, remove from the children object as well
// (if snapshots remain, they have their own parent info, and the child
// will be removed when the last snap goes away)
- image_ctx.image_lock.get_read();
+ image_ctx.image_lock.lock_shared();
if ((image_ctx.features & RBD_FEATURE_DEEP_FLATTEN) == 0 &&
!image_ctx.snaps.empty()) {
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
detach_parent();
return;
}
- image_ctx.image_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
ldout(cct, 5) << dendl;
auto ctx = create_context_callback<
&FlattenRequest<I>::handle_detach_child>(this);
auto req = image::DetachChildRequest<I>::create(image_ctx, ctx);
req->send();
- image_ctx.owner_lock.put_read();
+ image_ctx.owner_lock.unlock_shared();
}
template <typename I>
ldout(cct, 5) << dendl;
// should have been canceled prior to releasing lock
- image_ctx.owner_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
// stop early if the parent went away - it just means
// another flatten finished first, so this one is useless.
- image_ctx.image_lock.get_read();
+ image_ctx.image_lock.lock_shared();
if (!image_ctx.parent) {
ldout(cct, 5) << "image already flattened" << dendl;
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
this->complete(0);
return;
}
- image_ctx.image_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
// remove parent from this (base) image
auto ctx = create_context_callback<
&FlattenRequest<I>::handle_detach_parent>(this);
auto req = image::DetachParentRequest<I>::create(image_ctx, ctx);
req->send();
- image_ctx.owner_lock.put_read();
+ image_ctx.owner_lock.unlock_shared();
}
template <typename I>
template <typename I>
void MetadataRemoveRequest<I>::send_metadata_remove() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
template <typename I>
void MetadataSetRequest<I>::send_metadata_set() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (image_ctx.exclusive_lock != nullptr &&
void start_async_op() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
return;
}
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
start_async_op();
}
bool is_within_overlap_bounds() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
auto overlap = std::min(image_ctx.size, image_ctx.migration_info.overlap);
return overlap > 0 &&
void migrate_object() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
auto ctx = create_context_callback<
template <typename I>
void MigrateRequest<I>::send_op() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
void MigrateRequest<I>::migrate_objects() {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
uint64_t overlap_objects = get_num_overlap_objects();
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << dendl;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
auto overlap = image_ctx.migration_info.overlap;
void send_list_snaps() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(image_ctx.cct, 5) << m_oid
<< " C_VerifyObjectCallback::send_list_snaps"
<< dendl;
uint8_t get_object_state() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
for (std::vector<librados::clone_info_t>::const_iterator r =
m_snap_set.clones.begin(); r != m_snap_set.clones.end(); ++r) {
librados::snap_t from_snap_id;
uint64_t next_valid_snap_id(uint64_t snap_id) {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
std::map<librados::snap_t, SnapInfo>::iterator it =
image_ctx.snap_info.lower_bound(snap_id);
bool object_map_action(uint8_t new_state) {
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
ceph_assert(image_ctx.object_map != nullptr);
uint8_t state = (*image_ctx.object_map)[m_object_no];
<< cpp_strerror(r) << dendl;
}
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
switch (m_state) {
case STATE_VERIFY_OBJECTS:
if (m_invalidate.test_and_set()) {
template <typename I>
void ObjectMapIterateRequest<I>::send_verify_objects() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
uint64_t snap_id;
uint64_t num_objects;
{
- RWLock::RLocker l(m_image_ctx.image_lock);
+ std::shared_lock l{m_image_ctx.image_lock};
snap_id = m_image_ctx.snap_id;
num_objects = Striper::get_num_objects(m_image_ctx.layout,
m_image_ctx.get_image_size(snap_id));
template <typename I>
uint64_t ObjectMapIterateRequest<I>::get_image_size() const {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
true,
this->create_callback_context());
- ceph_assert(m_image_ctx.owner_lock.is_locked());
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
+ std::unique_lock image_locker{m_image_ctx.image_lock};
req->send();
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl;
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ std::shared_lock owner_lock{m_image_ctx.owner_lock};
switch (m_state) {
case STATE_RESIZE_OBJECT_MAP:
ldout(cct, 5) << "RESIZE_OBJECT_MAP" << dendl;
template <typename I>
void RebuildObjectMapRequest<I>::send_resize_object_map() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
- m_image_ctx.image_lock.get_read();
+ m_image_ctx.image_lock.lock_shared();
ceph_assert(m_image_ctx.object_map != nullptr);
uint64_t size = get_image_size();
uint64_t num_objects = Striper::get_num_objects(m_image_ctx.layout, size);
if (m_image_ctx.object_map->size() == num_objects) {
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
send_verify_objects();
return;
}
m_image_ctx.object_map->aio_resize(size, OBJECT_NONEXISTENT,
this->create_callback_context());
- m_image_ctx.image_lock.put_read();
+ m_image_ctx.image_lock.unlock_shared();
}
template <typename I>
void RebuildObjectMapRequest<I>::send_trim_image() {
CephContext *cct = m_image_ctx.cct;
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ std::shared_lock l{m_image_ctx.owner_lock};
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
uint64_t new_size;
uint64_t orig_size;
{
- RWLock::RLocker l(m_image_ctx.image_lock);
+ std::shared_lock l{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map != nullptr);
new_size = get_image_size();
template <typename I>
void RebuildObjectMapRequest<I>::send_verify_objects() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
m_state = STATE_VERIFY_OBJECTS;
template <typename I>
void RebuildObjectMapRequest<I>::send_save_object_map() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " send_save_object_map" << dendl;
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
- RWLock::RLocker image_locker(m_image_ctx.image_lock);
+ std::shared_lock image_locker{m_image_ctx.image_lock};
ceph_assert(m_image_ctx.object_map != nullptr);
m_image_ctx.object_map->aio_save(this->create_callback_context());
}
template <typename I>
void RebuildObjectMapRequest<I>::send_update_header() {
- ceph_assert(m_image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
// should have been canceled prior to releasing lock
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
ceph_assert(r == 0);
comp->release();
- RWLock::WLocker image_locker(m_image_ctx.image_lock);
+ std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.update_flags(m_image_ctx.snap_id, flags, false);
}
template <typename I>
uint64_t RebuildObjectMapRequest<I>::get_image_size() const {
- ceph_assert(m_image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
if (m_image_ctx.snap_id == CEPH_NOSNAP) {
if (!m_image_ctx.resize_reqs.empty()) {
return m_image_ctx.resize_reqs.front()->get_image_size();
return true;
}
- RWLock::RLocker owner_lock(image_ctx.owner_lock);
+ std::shared_lock owner_lock{image_ctx.owner_lock};
switch (m_state) {
case STATE_READ_SOURCE_HEADER:
send_write_destination_header();
CephContext *cct = image_ctx.cct;
if (m_state == STATE_READ_SOURCE_HEADER && r == -ENOENT) {
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.name == m_dest_name) {
// signal that replay raced with itself
return -EEXIST;
template <typename I>
void Request<I>::send() {
- I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ [[maybe_unused]] I &image_ctx = this->m_image_ctx;
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
// automatically create the event if we don't need to worry
// about affecting concurrent IO ops
bool Request<I>::append_op_event() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.journal != nullptr &&
image_ctx.journal->is_journal_appending()) {
append_op_event(util::create_context_callback<
template <typename I>
bool Request<I>::commit_op_event(int r) {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (!m_appended_op_event) {
return false;
template <typename I>
void Request<I>::replay_op_ready(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
ceph_assert(m_op_tid != 0);
m_appended_op_event = true;
template <typename I>
void Request<I>::append_op_event(Context *on_safe) {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(!can_affect_io());
// haven't started the request state machine yet
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
send_op();
}
}
ImageCtxT &image_ctx = this->m_image_ctx;
ceph_assert(can_affect_io());
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::scoped_lock locker{image_ctx.owner_lock, image_ctx.image_lock};
if (image_ctx.journal != nullptr) {
if (image_ctx.journal->is_journal_replaying()) {
Context *ctx = util::create_context_callback<T, MF>(request);
I &image_ctx = this->m_image_ctx;
ResizeRequest *next_req = NULL;
{
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::unique_lock image_locker{image_ctx.image_lock};
ceph_assert(m_xlist_item.remove_myself());
if (!image_ctx.resize_reqs.empty()) {
next_req = image_ctx.resize_reqs.front();
}
if (next_req != NULL) {
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
next_req->send();
}
}
template <typename I>
void ResizeRequest<I>::send() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::unique_lock image_locker{image_ctx.image_lock};
if (!m_xlist_item.is_on_list()) {
image_ctx.resize_reqs.push_back(&m_xlist_item);
if (image_ctx.resize_reqs.front() != this) {
template <typename I>
void ResizeRequest<I>::send_op() {
- I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ [[maybe_unused]] I &image_ctx = this->m_image_ctx;
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (this->is_canceled()) {
this->async_complete(-ERESTART);
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
TrimRequest<I> *req = TrimRequest<I>::create(
image_ctx, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_trim_image>(this),
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
auto ctx = create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_flush_cache>(this);
auto aio_comp = io::AioCompletion::create_and_start(
// need to invalidate since we're deleting objects, and
// ObjectCacher doesn't track non-existent objects
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
image_ctx.io_object_dispatcher->invalidate_cache(create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_invalidate_cache>(this));
}
I &image_ctx = this->m_image_ctx;
{
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::unique_lock image_locker{image_ctx.image_lock};
m_shrink_size_visible = true;
}
return nullptr;
}
- image_ctx.owner_lock.get_read();
- image_ctx.image_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
+ image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr) {
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
// IO is still blocked
send_update_header();
image_ctx.object_map->aio_resize(
m_new_size, OBJECT_NONEXISTENT, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_grow_object_map>(this));
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
return nullptr;
}
Context *ResizeRequest<I>::send_shrink_object_map() {
I &image_ctx = this->m_image_ctx;
- image_ctx.owner_lock.get_read();
- image_ctx.image_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
+ image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr || m_new_size > m_original_size) {
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
update_size_and_overlap();
return this->create_context_finisher(0);
image_ctx.object_map->aio_resize(
m_new_size, OBJECT_NONEXISTENT, create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_shrink_object_map>(this));
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
return nullptr;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
image_ctx.io_work_queue->block_writes(create_context_callback<
ResizeRequest<I>, &ResizeRequest<I>::handle_post_block_writes>(this));
}
<< "new_size=" << m_new_size << dendl;;
// should have been canceled prior to releasing lock
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
template <typename I>
void ResizeRequest<I>::compute_parent_overlap() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
if (image_ctx.parent == NULL) {
m_new_parent_overlap = 0;
void ResizeRequest<I>::update_size_and_overlap() {
I &image_ctx = this->m_image_ctx;
{
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::unique_lock image_locker{image_ctx.image_lock};
image_ctx.size = m_new_size;
if (image_ctx.parent != NULL && m_new_size < m_original_size) {
template <typename I>
void SnapshotCreateRequest<I>::send_suspend_aio() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
// should have been canceled prior to releasing lock
ceph_assert(image_ctx.exclusive_lock == nullptr ||
Context *SnapshotCreateRequest<I>::send_create_object_map() {
I &image_ctx = this->m_image_ctx;
- image_ctx.image_lock.get_read();
+ image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr || m_skip_object_map) {
- image_ctx.image_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
update_snap_context();
image_ctx.io_work_queue->unblock_writes();
m_snap_id, create_context_callback<
SnapshotCreateRequest<I>,
&SnapshotCreateRequest<I>::handle_create_object_map>(this));
- image_ctx.image_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
return nullptr;
}
void SnapshotCreateRequest<I>::update_snap_context() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.old_format) {
return;
}
template <typename I>
void SnapshotLimitRequest<I>::send_limit_snaps() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
librados::ObjectWriteOperation op;
cls_client::snapshot_set_limit(&op, m_snap_limit);
template <typename I>
void SnapshotProtectRequest<I>::send_protect_snap() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
int SnapshotProtectRequest<I>::verify_and_send_protect_snap() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
#include "librbd/operation/SnapshotRemoveRequest.h"
#include "common/dout.h"
#include "common/errno.h"
+#include "include/ceph_assert.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
I &image_ctx = this->m_image_ctx;
CephContext *cct = image_ctx.cct;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.snap_info.find(m_snap_id) == image_ctx.snap_info.end()) {
lderr(cct) << "snapshot doesn't exist" << dendl;
this->async_complete(-ENOENT);
bool detach_child = false;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
cls::rbd::ParentImageSpec our_pspec;
int r = image_ctx.get_parent_spec(m_snap_id, &our_pspec);
CephContext *cct = image_ctx.cct;
{
- RWLock::RLocker owner_lock(image_ctx.owner_lock);
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_lock{image_ctx.owner_lock};
+ std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(cct, 5) << dendl;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::unique_lock image_locker{image_ctx.image_lock};
image_ctx.rm_snap(m_snap_namespace, m_snap_name, m_snap_id);
}
int SnapshotRemoveRequest<I>::scan_for_parents(
cls::rbd::ParentImageSpec &pspec) {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
if (pspec.pool_id != -1) {
map<uint64_t, SnapInfo>::iterator it;
template <typename I>
journal::Event SnapshotRenameRequest<I>::create_event(uint64_t op_tid) const {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.image_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
std::string src_snap_name;
auto snap_info_it = image_ctx.snap_info.find(m_snap_id);
template <typename I>
void SnapshotRenameRequest<I>::send_rename_snap() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
+ std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
<< m_object_num << dendl;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (m_object_num < m_head_num_objects &&
m_snap_object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_num) &&
uint64_t current_size;
{
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
current_size = image_ctx.get_image_size(CEPH_NOSNAP);
}
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_resize_image>(this);
bool object_map_enabled;
CephContext *cct = image_ctx.cct;
{
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
object_map_enabled = (image_ctx.object_map != nullptr);
int r = image_ctx.get_flags(m_snap_id, &flags);
if (r < 0) {
I &image_ctx = this->m_image_ctx;
{
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
uint64_t num_objects;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
num_objects = Striper::get_num_objects(image_ctx.layout,
image_ctx.get_current_size());
}
bool object_map_enabled;
{
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
object_map_enabled = (image_ctx.object_map != nullptr);
}
if (!object_map_enabled) {
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
- RWLock::RLocker owner_lock(image_ctx.owner_lock);
+ std::shared_lock owner_lock{image_ctx.owner_lock};
Context *ctx = create_context_callback<
SnapshotRollbackRequest<I>,
&SnapshotRollbackRequest<I>::handle_invalidate_cache>(this);
void SnapshotRollbackRequest<I>::apply() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::WLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::unique_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
std::swap(m_object_map, image_ctx.object_map);
}
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 10) << this << " scanning pool '" << m_pool.second << "'"
return should_complete_error();
}
- RWLock::RLocker owner_lock(image_ctx.owner_lock);
+ std::shared_lock owner_lock{image_ctx.owner_lock};
bool finished = false;
switch (m_state) {
case STATE_UNPROTECT_SNAP_START:
template <typename I>
bool SnapshotUnprotectRequest<I>::should_complete_error() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
CephContext *cct = image_ctx.cct;
lderr(cct) << this << " " << __func__ << ": "
<< "ret_val=" << m_ret_val << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_start() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_scan_pool_children() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_finish() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
void SnapshotUnprotectRequest<I>::send_unprotect_snap_rollback() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
template <typename I>
int SnapshotUnprotectRequest<I>::verify_and_send_unprotect_snap_start() {
I &image_ctx = this->m_image_ctx;
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
CephContext *cct = image_ctx.cct;
if ((image_ctx.features & RBD_FEATURE_LAYERING) == 0) {
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(m_cct, 20) << dendl;
}
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_no)) {
// can skip because the object does not exist
ldout(m_cct, 20) << dendl;
- image_ctx.owner_lock.get_read();
- image_ctx.image_lock.get_read();
+ image_ctx.owner_lock.lock_shared();
+ image_ctx.image_lock.lock_shared();
if (image_ctx.object_map == nullptr) {
// possible that exclusive lock was lost in background
lderr(m_cct) << "object map is not initialized" << dendl;
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
finish_op(-EINVAL);
return;
}
m_finish_op_ctx = image_ctx.exclusive_lock->start_op(&r);
if (m_finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
finish_op(r);
return;
}
OBJECT_EXISTS, {}, false, ctx);
// NOTE: state machine might complete before we reach here
- image_ctx.image_lock.put_read();
- image_ctx.owner_lock.put_read();
+ image_ctx.image_lock.unlock_shared();
+ image_ctx.owner_lock.unlock_shared();
if (!sent) {
finish_op(0);
}
&C_SparsifyObject<I>::handle_post_update_object_map>(this);
bool sent;
{
- RWLock::RLocker owner_locker(image_ctx.owner_lock);
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock owner_locker{image_ctx.owner_lock};
+ std::shared_lock image_locker{image_ctx.image_lock};
assert(image_ctx.exclusive_lock->is_lock_owner());
assert(image_ctx.object_map != nullptr);
template <typename I>
void SparsifyRequest<I>::sparsify_objects() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 5) << dendl;
- assert(image_ctx.owner_lock.is_locked());
+ assert(ceph_mutex_is_locked(image_ctx.owner_lock));
uint64_t objects = 0;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
objects = image_ctx.get_object_count(CEPH_NOSNAP);
}
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
int send() override {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr &&
!image_ctx.object_map->object_may_exist(m_object_no)) {
return 1;
return true;
}
- RWLock::RLocker owner_lock(image_ctx.owner_lock);
+ std::shared_lock owner_lock{image_ctx.owner_lock};
switch (m_state) {
case STATE_PRE_TRIM:
ldout(cct, 5) << " PRE_TRIM" << dendl;
template<typename I>
void TrimRequest<I>::send_pre_trim() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
if (m_delete_start >= m_num_objects) {
send_clean_boundary();
}
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_pre_trim: "
<< " delete_start_min=" << m_delete_start_min
template<typename I>
void TrimRequest<I>::send_copyup_objects() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
::SnapContext snapc;
bool has_snapshots;
uint64_t parent_overlap;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
snapc = image_ctx.snapc;
has_snapshots = !image_ctx.snaps.empty();
template <typename I>
void TrimRequest<I>::send_remove_objects() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
ldout(image_ctx.cct, 5) << this << " send_remove_objects: "
<< " delete_start=" << m_delete_start
template<typename I>
void TrimRequest<I>::send_post_trim() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
if (image_ctx.object_map != nullptr) {
ldout(image_ctx.cct, 5) << this << " send_post_trim:"
<< " delete_start_min=" << m_delete_start_min
template <typename I>
void TrimRequest<I>::send_clean_boundary() {
I &image_ctx = this->m_image_ctx;
- ceph_assert(image_ctx.owner_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
if (m_delete_off <= m_new_size) {
send_finish(0);
::SnapContext snapc;
{
- RWLock::RLocker image_locker(image_ctx.image_lock);
+ std::shared_lock image_locker{image_ctx.image_lock};
snapc = image_ctx.snapc;
}
Notifier::Notifier(ContextWQ *work_queue, IoCtx &ioctx, const std::string &oid)
: m_work_queue(work_queue), m_ioctx(ioctx), m_oid(oid),
- m_aio_notify_lock(util::unique_lock_name(
- "librbd::object_watcher::Notifier::m_aio_notify_lock", this)) {
+ m_aio_notify_lock(ceph::make_mutex(util::unique_lock_name(
+ "librbd::object_watcher::Notifier::m_aio_notify_lock", this))) {
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
Notifier::~Notifier() {
- Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+ std::lock_guard aio_notify_locker{m_aio_notify_lock};
ceph_assert(m_pending_aio_notifies == 0);
}
void Notifier::flush(Context *on_finish) {
- Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+ std::lock_guard aio_notify_locker{m_aio_notify_lock};
if (m_pending_aio_notifies == 0) {
m_work_queue->queue(on_finish, 0);
return;
void Notifier::notify(bufferlist &bl, NotifyResponse *response,
Context *on_finish) {
{
- Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+ std::lock_guard aio_notify_locker{m_aio_notify_lock};
++m_pending_aio_notifies;
ldout(m_cct, 20) << "pending=" << m_pending_aio_notifies << dendl;
void Notifier::handle_notify(int r, Context *on_finish) {
ldout(m_cct, 20) << "r=" << r << dendl;
- Mutex::Locker aio_notify_locker(m_aio_notify_lock);
+ std::lock_guard aio_notify_locker{m_aio_notify_lock};
ceph_assert(m_pending_aio_notifies > 0);
--m_pending_aio_notifies;
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include <list>
CephContext *m_cct;
std::string m_oid;
- Mutex m_aio_notify_lock;
+ ceph::mutex m_aio_notify_lock;
size_t m_pending_aio_notifies = 0;
Contexts m_aio_notify_flush_ctxs;
// vim: ts=8 sw=2 smarttab
#include "librbd/watcher/RewatchRequest.h"
-#include "common/RWLock.h"
+#include "common/ceph_mutex.h"
#include "common/errno.h"
#include "librbd/Utils.h"
using std::string;
RewatchRequest::RewatchRequest(librados::IoCtx& ioctx, const string& oid,
- RWLock &watch_lock,
+ ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish)
: m_ioctx(ioctx), m_oid(oid), m_watch_lock(watch_lock),
}
void RewatchRequest::unwatch() {
- ceph_assert(m_watch_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_watch_lock));
if (*m_watch_handle == 0) {
rewatch();
return;
}
{
- RWLock::WLocker watch_locker(m_watch_lock);
+ std::unique_lock watch_locker{m_watch_lock};
*m_watch_handle = m_rewatch_handle;
}
#ifndef CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
#define CEPH_LIBRBD_WATCHER_REWATCH_REQUEST_H
+#include "common/ceph_mutex.h"
#include "include/int_types.h"
#include "include/rados/librados.hpp"
struct Context;
-struct RWLock;
namespace librbd {
public:
static RewatchRequest *create(librados::IoCtx& ioctx, const std::string& oid,
- RWLock &watch_lock,
+ ceph::shared_mutex &watch_lock,
librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish) {
return new RewatchRequest(ioctx, oid, watch_lock, watch_ctx, watch_handle,
}
RewatchRequest(librados::IoCtx& ioctx, const std::string& oid,
- RWLock &watch_lock, librados::WatchCtx2 *watch_ctx,
+ ceph::shared_mutex &watch_lock, librados::WatchCtx2 *watch_ctx,
uint64_t *watch_handle, Context *on_finish);
void send();
librados::IoCtx& m_ioctx;
std::string m_oid;
- RWLock &m_watch_lock;
+ ceph::shared_mutex &m_watch_lock;
librados::WatchCtx2 *m_watch_ctx;
uint64_t *m_watch_handle;
Context *m_on_finish;