TEST_F(TestMockImageDeleterSnapshotPurgeRequest, Success) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap2", 2,
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, OpenError) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, AcquireLockError) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectBusy) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectError) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapRemoveError) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, CloseError) {
{
- RWLock::WLocker image_locker(m_local_image_ctx->image_lock);
+ std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
MockContextWQ *work_queue;
typedef librbd::TrashWatcher<librbd::MockTestImageCtx> LibrbdTrashWatcher;
struct MockListener : TrashListener {
- MOCK_METHOD2(handle_trash_image, void(const std::string&, const utime_t&));
+ MOCK_METHOD2(handle_trash_image, void(const std::string&,
+ const ceph::real_clock::time_point&));
};
void expect_work_queue(MockThreads &mock_threads) {
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx =
new FunctionContext([this, ctx](int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
template <>
struct Threads<librbd::MockTestImageCtx> {
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
SafeTimer *timer;
ContextWQ *work_queue;
static ImageSync* create(
librbd::MockTestImageCtx *local_image_ctx,
librbd::MockTestImageCtx *remote_image_ctx,
- SafeTimer *timer, Mutex *timer_lock,
+ SafeTimer *timer, ceph::mutex *timer_lock,
const std::string &mirror_uuid, ::journal::MockJournaler *journaler,
librbd::journal::MirrorPeerClientMeta *client_meta, ContextWQ *work_queue,
InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher,
template <>
struct Threads<librbd::MockTestImageCtx> {
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
SafeTimer *timer;
ContextWQ *work_queue;
template <>
struct Threads<librbd::MockTestImageCtx> {
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
SafeTimer *timer;
ContextWQ *work_queue;
struct rbd_bencher {
librbd::Image *image;
- Mutex lock;
- Cond cond;
+ ceph::mutex lock = ceph::make_mutex("rbd_bencher::lock");
+ ceph::condition_variable cond;
int in_flight;
explicit rbd_bencher(librbd::Image *i)
: image(i),
- lock("rbd_bencher::lock"),
in_flight(0) {
}
bool start_write(int max, uint64_t off, uint64_t len, bufferlist& bl,
int op_flags) {
{
- Mutex::Locker l(lock);
+ std::lock_guard l{lock};
if (in_flight >= max)
return false;
in_flight++;
}
void wait_for(int max) {
- Mutex::Locker l(lock);
+ std::unique_lock l{lock};
while (in_flight > max) {
- utime_t dur;
- dur.set_from_double(.2);
- cond.WaitInterval(lock, dur);
+ cond.wait_for(l, 200ms);
}
}
cout << "write error: " << cpp_strerror(ret) << std::endl;
exit(ret < 0 ? -ret : ret);
}
- b->lock.Lock();
+ b->lock.lock();
b->in_flight--;
- b->cond.Signal();
- b->lock.Unlock();
+ b->cond.notify_all();
+ b->lock.unlock();
c->release();
}
#include "include/rados/librados.hpp"
#include "common/Cond.h"
#include "common/errno.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/internal.h"
#include "librbd/api/Mirror.h"
#include "tools/rbd_mirror/ClusterWatcher.h"
class TestClusterWatcher : public ::rbd::mirror::TestFixture {
public:
- TestClusterWatcher() : m_lock("TestClusterWatcherLock")
- {
+ TestClusterWatcher() {
m_cluster = std::make_shared<librados::Rados>();
EXPECT_EQ("", connect_cluster_pp(*m_cluster));
}
void check_peers() {
m_cluster_watcher->refresh_pools();
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
ASSERT_EQ(m_pool_peers, m_cluster_watcher->get_pool_peers());
}
RadosRef m_cluster;
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("TestClusterWatcherLock");
unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon;
unique_ptr<ClusterWatcher> m_cluster_watcher;
false);
EXPECT_EQ(0, ictx->state->open(0));
{
- RWLock::WLocker image_locker(ictx->image_lock);
+ std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new librbd::journal::DisabledPolicy());
}
false);
EXPECT_EQ(0, ictx->state->open(0));
{
- RWLock::WLocker image_locker(ictx->image_lock);
+ std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new librbd::journal::DisabledPolicy());
}
struct C_WatchCtx : public librados::WatchCtx2 {
TestImageReplayer *test;
std::string oid;
- Mutex lock;
- Cond cond;
+ ceph::mutex lock = ceph::make_mutex("C_WatchCtx::lock");
+ ceph::condition_variable cond;
bool notified;
C_WatchCtx(TestImageReplayer *test, const std::string &oid)
- : test(test), oid(oid), lock("C_WatchCtx::lock"), notified(false) {
+ : test(test), oid(oid), notified(false) {
}
void handle_notify(uint64_t notify_id, uint64_t cookie,
bufferlist bl;
test->m_remote_ioctx.notify_ack(oid, notify_id, cookie, bl);
- Mutex::Locker locker(lock);
+ std::lock_guard locker{lock};
notified = true;
- cond.Signal();
+ cond.notify_all();
}
void handle_error(uint64_t cookie, int err) override {
return false;
}
- Mutex::Locker locker(m_watch_ctx->lock);
+ std::unique_lock locker{m_watch_ctx->lock};
while (!m_watch_ctx->notified) {
- if (m_watch_ctx->cond.WaitInterval(m_watch_ctx->lock,
- utime_t(seconds, 0)) != 0) {
+ if (m_watch_ctx->cond.wait_for(locker,
+ std::chrono::seconds(seconds)) ==
+ std::cv_status::timeout) {
return false;
}
}
// race failed op shut down with new ops
open_remote_image(&ictx);
for (uint64_t i = 0; i < 10; ++i) {
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
C_SaferCond request_lock;
ictx->exclusive_lock->acquire_lock(&request_lock);
ASSERT_EQ(0, request_lock.wait());
// race failed op shut down with new tag flush
open_remote_image(&ictx);
{
- RWLock::RLocker owner_locker(ictx->owner_lock);
+ std::shared_lock owner_locker{ictx->owner_lock};
C_SaferCond request_lock;
ictx->exclusive_lock->acquire_lock(&request_lock);
ASSERT_EQ(0, request_lock.wait());
#include "test/rbd_mirror/test_fixture.h"
#include "include/stringify.h"
#include "include/rbd/librbd.hpp"
+#include "common/Cond.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "librbd/ExclusiveLock.h"
}
}
- RWLock::RLocker owner_locker(image_ctx->owner_lock);
+ std::shared_lock owner_locker{image_ctx->owner_lock};
ASSERT_EQ(0, flush(image_ctx));
}
C_SaferCond ctx;
{
- RWLock::RLocker owner_locker((*image_ctx)->owner_lock);
+ std::shared_lock owner_locker{(*image_ctx)->owner_lock};
(*image_ctx)->exclusive_lock->try_acquire_lock(&ctx);
}
ASSERT_EQ(0, ctx.wait());
std::move(bl),
0));
{
- RWLock::RLocker owner_locker(m_remote_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
std::move(bl),
0));
{
- RWLock::RLocker owner_locker(m_remote_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
m_remote_image_ctx->io_work_queue->discard(
off + 1, len - 2, m_remote_image_ctx->discard_granularity_bytes));
{
- RWLock::RLocker owner_locker(m_remote_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
for (auto &snap_name : snap_names) {
uint64_t remote_snap_id;
{
- RWLock::RLocker remote_image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock};
remote_snap_id = m_remote_image_ctx->get_snap_id(
cls::rbd::UserSnapshotNamespace{}, snap_name);
}
m_remote_image_ctx->state->snap_set(remote_snap_id, &ctx);
ASSERT_EQ(0, ctx.wait());
- RWLock::RLocker remote_image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock};
remote_size = m_remote_image_ctx->get_image_size(
m_remote_image_ctx->snap_id);
}
uint64_t local_snap_id;
{
- RWLock::RLocker image_locker(m_local_image_ctx->image_lock);
+ std::shared_lock image_locker{m_local_image_ctx->image_lock};
local_snap_id = m_local_image_ctx->get_snap_id(
cls::rbd::UserSnapshotNamespace{}, snap_name);
}
m_local_image_ctx->state->snap_set(local_snap_id, &ctx);
ASSERT_EQ(0, ctx.wait());
- RWLock::RLocker image_locker(m_local_image_ctx->image_lock);
+ std::shared_lock image_locker{m_local_image_ctx->image_lock};
local_size = m_local_image_ctx->get_image_size(
m_local_image_ctx->snap_id);
bool flags_set;
class Listener : public rbd::mirror::leader_watcher::Listener {
public:
Listener()
- : m_test_lock(unique_lock_name("LeaderWatcher::m_test_lock", this)) {
+ : m_test_lock(ceph::make_mutex(
+ unique_lock_name("LeaderWatcher::m_test_lock", this))) {
}
void on_acquire(int r, Context *ctx) {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
m_on_acquire_r = r;
m_on_acquire = ctx;
}
void on_release(int r, Context *ctx) {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
m_on_release_r = r;
m_on_release = ctx;
}
int acquire_count() const {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
return m_acquire_count;
}
int release_count() const {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
return m_release_count;
}
void post_acquire_handler(Context *on_finish) override {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
m_acquire_count++;
on_finish->complete(m_on_acquire_r);
m_on_acquire_r = 0;
}
void pre_release_handler(Context *on_finish) override {
- Mutex::Locker locker(m_test_lock);
+ std::lock_guard locker{m_test_lock};
m_release_count++;
on_finish->complete(m_on_release_r);
m_on_release_r = 0;
}
private:
- mutable Mutex m_test_lock;
+ mutable ceph::mutex m_test_lock;
int m_acquire_count = 0;
int m_release_count = 0;
int m_on_acquire_r = 0;
#include "librbd/api/Mirror.h"
#include "common/Cond.h"
#include "common/errno.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/PoolWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/Types.h"
public:
TestPoolWatcher()
- : m_lock("TestPoolWatcherLock"), m_pool_watcher_listener(this),
+ : m_pool_watcher_listener(this),
m_image_number(0), m_snap_number(0)
{
m_cluster = std::make_shared<librados::Rados>();
struct PoolWatcherListener : public rbd::mirror::pool_watcher::Listener {
TestPoolWatcher *test;
- Cond cond;
+ ceph::condition_variable cond;
ImageIds image_ids;
explicit PoolWatcherListener(TestPoolWatcher *test) : test(test) {
void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids) override {
- Mutex::Locker locker(test->m_lock);
+ std::lock_guard locker{test->m_lock};
for (auto &image_id : removed_image_ids) {
image_ids.erase(image_id);
}
image_ids.insert(added_image_ids.begin(), added_image_ids.end());
- cond.Signal();
+ cond.notify_all();
}
};
}
void check_images() {
- Mutex::Locker l(m_lock);
+ std::unique_lock l{m_lock};
while (m_mirrored_images != m_pool_watcher_listener.image_ids) {
- if (m_pool_watcher_listener.cond.WaitInterval(
- m_lock, utime_t(10, 0)) != 0) {
+ if (m_pool_watcher_listener.cond.wait_for(l, 10s) == std::cv_status::timeout) {
break;
}
}
ASSERT_EQ(m_mirrored_images, m_pool_watcher_listener.image_ids);
}
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("TestPoolWatcherLock");
RadosRef m_cluster;
PoolWatcherListener m_pool_watcher_listener;
unique_ptr<PoolWatcher<> > m_pool_watcher;
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
MockContextWQ *work_queue;
}
};
- TestMockImageMap()
- : m_lock("TestMockImageMap::m_lock"),
- m_notify_update_count(0),
- m_map_update_count(0) {
- }
+ TestMockImageMap() = default;
void SetUp() override {
TestFixture::SetUp();
EXPECT_CALL(*mock_threads.timer, add_event_after(_,_))
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx = new FunctionContext([this, ctx](int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "0");
auto wrapped_ctx = new FunctionContext([this, ctx](int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
.WillOnce(Invoke([this, &request, r]() {
request.on_finish->complete(r);
if (r == 0) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
++m_map_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
}
}));
}
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener, mock_acquire_image(global_image_id, _))
.WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
})));
}
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener, mock_release_image(global_image_id, _))
.WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
})));
}
EXPECT_CALL(mock_listener,
mock_remove_image(mirror_uuid, global_image_id, _))
.WillOnce(WithArg<2>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
})));
}
EXPECT_CALL(mock_listener, mock_release_image(_, _))
.Times(count)
.WillRepeatedly(Invoke([this, global_image_ids, peer_ack_ctxs](std::string global_image_id, Context* ctx) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
global_image_ids->emplace(global_image_id);
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
}));
}
}
bool wait_for_listener_notify(uint32_t count) {
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
while (m_notify_update_count < count) {
- if (m_cond.WaitInterval(m_lock, utime_t(10, 0)) != 0) {
+ if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) {
break;
}
}
}
bool wait_for_map_update(uint32_t count) {
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
while (m_map_update_count < count) {
- if (m_cond.WaitInterval(m_lock, utime_t(10, 0)) != 0) {
+ if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) {
break;
}
}
}
}
- Mutex m_lock;
- Cond m_cond;
- uint32_t m_notify_update_count;
- uint32_t m_map_update_count;
+ ceph::mutex m_lock = ceph::make_mutex("TestMockImageMap::m_lock");
+ ceph::condition_variable m_cond;
+ uint32_t m_notify_update_count = 0;
+ uint32_t m_map_update_count = 0;
std::string m_local_instance_id;
};
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
MockContextWQ *work_queue;
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
- Cond timer_cond;
+ ceph::mutex &timer_lock;
+ ceph::condition_variable timer_cond;
MockContextWQ *work_queue;
EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
.WillOnce(DoAll(
WithArg<1>(Invoke([this, &mock_threads, timer_ctx](Context *ctx) {
- ceph_assert(mock_threads.timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(mock_threads.timer_lock));
if (timer_ctx != nullptr) {
*timer_ctx = ctx;
- mock_threads.timer_cond.SignalOne();
+ mock_threads.timer_cond.notify_one();
} else {
m_threads->work_queue->queue(
new FunctionContext([&mock_threads, ctx](int) {
- Mutex::Locker timer_lock(mock_threads.timer_lock);
+ std::lock_guard timer_lock{mock_threads.timer_lock};
ctx->complete(0);
}), 0);
}
ASSERT_TRUE(timer_ctx1 != nullptr);
{
- Mutex::Locker timer_locker(mock_threads.timer_lock);
+ std::lock_guard timer_locker{mock_threads.timer_lock};
timer_ctx1->complete(0);
}
template <>
struct Threads<librbd::MockTestImageCtx> {
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
SafeTimer *timer;
ContextWQ *work_queue;
const std::string& oid, librbd::Watcher *watcher,
managed_lock::Mode mode, bool blacklist_on_break_lock,
uint32_t blacklist_expire_seconds)
- : m_work_queue(work_queue), m_lock("ManagedLock::m_lock") {
+ : m_work_queue(work_queue) {
MockManagedLock::get_instance().construct();
}
ContextWQ *m_work_queue;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock = ceph::make_mutex("ManagedLock::m_lock");
bool is_lock_owner() const {
return MockManagedLock::get_instance().is_lock_owner();
template <>
struct Threads<librbd::MockTestImageCtx> {
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
SafeTimer *timer;
ContextWQ *work_queue;
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
- Cond timer_cond;
+ ceph::mutex &timer_lock;
+ ceph::condition_variable timer_cond;
MockContextWQ *work_queue;
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
- Mutex &timer_lock;
+ ceph::mutex &timer_lock;
MockContextWQ *work_queue;
}
};
- TestMockPoolWatcher() : m_lock("TestMockPoolWatcher::m_lock") {
- }
+ TestMockPoolWatcher() = default;
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
EXPECT_CALL(mock_listener, mock_handle_update(mirror_uuid, added_image_ids,
removed_image_ids))
.WillOnce(WithoutArgs(Invoke([this]() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
++m_update_count;
- m_cond.Signal();
+ m_cond.notify_all();
})));
}
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx =
new FunctionContext([this, ctx](int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
}
bool wait_for_update(uint32_t count) {
- Mutex::Locker locker(m_lock);
- while (m_update_count < count) {
- if (m_cond.WaitInterval(m_lock, utime_t(10, 0)) != 0) {
- break;
- }
- }
- if (m_update_count < count) {
+ std::unique_lock locker{m_lock};
+ if (m_cond.wait_for(locker, 10s,
+ [count, this] { return m_update_count >= count; })) {
+ m_update_count -= count;
+ return true;
+ } else {
return false;
}
-
- m_update_count -= count;
- return true;
}
- Mutex m_lock;
- Cond m_cond;
+ ceph::mutex m_lock = ceph::make_mutex("TestMockPoolWatcher::m_lock");
+ ceph::condition_variable m_cond;
uint32_t m_update_count = 0;
};
&refresh_sent]() {
*mock_refresh_images_request.image_ids = image_ids;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
refresh_sent = true;
- m_cond.Signal();
+ m_cond.notify_all();
}));
expect_mirror_uuid_get(m_remote_io_ctx, "remote uuid", 0);
mock_pool_watcher.init(nullptr);
{
- Mutex::Locker locker(m_lock);
- while (!refresh_sent) {
- m_cond.Wait(m_lock);
- }
+ std::unique_lock locker{m_lock};
+ m_cond.wait(locker, [&] { return refresh_sent; });
}
MirroringWatcher::get_instance().handle_image_updated(
Context *notify_ctx = nullptr;
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillOnce(Invoke([this, ¬ify_ctx](Context *ctx, int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ASSERT_EQ(nullptr, notify_ctx);
notify_ctx = ctx;
- m_cond.Signal();
+ m_cond.notify_all();
}));
expect_listener_handle_update(
mock_listener, "remote uuid",
namespace rbd {
namespace mirror {
-ClusterWatcher::ClusterWatcher(RadosRef cluster, Mutex &lock,
+ClusterWatcher::ClusterWatcher(RadosRef cluster, ceph::mutex &lock,
ServiceDaemon<librbd::ImageCtx>* service_daemon)
: m_cluster(cluster), m_lock(lock), m_service_daemon(service_daemon)
{
const ClusterWatcher::PoolPeers& ClusterWatcher::get_pool_peers() const
{
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
return m_pool_peers;
}
PoolPeers pool_peers;
read_pool_peers(&pool_peers);
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
m_pool_peers = pool_peers;
// TODO: perhaps use a workqueue instead, once we get notifications
// about config changes for existing pools
#include <set>
#include "common/ceph_context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
typedef std::set<PeerSpec, PeerSpecCompare> Peers;
typedef std::map<int64_t, Peers> PoolPeers;
- ClusterWatcher(RadosRef cluster, Mutex &lock,
+ ClusterWatcher(RadosRef cluster, ceph::mutex &lock,
ServiceDaemon<librbd::ImageCtx>* service_daemon);
~ClusterWatcher() = default;
ClusterWatcher(const ClusterWatcher&) = delete;
typedef std::unordered_map<int64_t, service_daemon::CalloutId> ServicePools;
RadosRef m_cluster;
- Mutex &m_lock;
+ ceph::mutex &m_lock;
ServiceDaemon<librbd::ImageCtx>* m_service_daemon;
ServicePools m_service_pools;
ServiceDaemon<librbd::ImageCtx>* service_daemon)
: m_local_io_ctx(local_io_ctx), m_threads(threads),
m_service_daemon(service_daemon), m_trash_listener(this),
- m_lock(librbd::util::unique_lock_name("rbd::mirror::ImageDeleter::m_lock",
- this)) {
+ m_lock(ceph::make_mutex(
+ librbd::util::unique_lock_name("rbd::mirror::ImageDeleter::m_lock",
+ this))) {
}
#undef dout_prefix
template <typename I>
void ImageDeleter<I>::wait_for_ops(Context* on_finish) {
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
m_running = false;
cancel_retry_timer();
}
template <typename I>
void ImageDeleter<I>::cancel_all_deletions(Context* on_finish) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
// wake up any external state machines waiting on deletions
ceph_assert(m_in_flight_delete_queue.empty());
for (auto& queue : {&m_delete_queue, &m_retry_delete_queue}) {
m_threads->work_queue->queue(on_finish, r);
});
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto del_info = find_delete_info(image_id);
if (!del_info && scheduled_only) {
// image not scheduled for deletion
void ImageDeleter<I>::complete_active_delete(DeleteInfoRef* delete_info,
int r) {
dout(20) << "info=" << *delete_info << ", r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
notify_on_delete((*delete_info)->image_id, r);
delete_info->reset();
}
double retry_delay) {
dout(20) << "info=" << *delete_info << ", r=" << error_code << dendl;
if (error_code == -EBLACKLISTED) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
derr << "blacklisted while deleting local image" << dendl;
complete_active_delete(delete_info, error_code);
return;
}
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
auto& delete_info_ref = *delete_info;
notify_on_delete(delete_info_ref->image_id, error_code);
delete_info_ref->error_code = error_code;
++delete_info_ref->retries;
- delete_info_ref->retry_time = ceph_clock_now();
- delete_info_ref->retry_time += retry_delay;
+ delete_info_ref->retry_time = (clock_t::now() +
+ ceph::make_timespan(retry_delay));
m_retry_delete_queue.push_back(delete_info_ref);
schedule_retry_timer();
template <typename I>
typename ImageDeleter<I>::DeleteInfoRef
ImageDeleter<I>::find_delete_info(const std::string &image_id) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
DeleteQueue delete_queues[] = {m_in_flight_delete_queue,
m_retry_delete_queue,
m_delete_queue};
f->open_array_section("delete_images_queue");
}
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
for (const auto& image : m_delete_queue) {
image->print_status(f, ss);
}
vector<string> ImageDeleter<I>::get_delete_queue_items() {
vector<string> items;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
for (const auto& del_info : m_delete_queue) {
items.push_back(del_info->image_id);
}
vector<pair<string, int> > ImageDeleter<I>::get_failed_queue_items() {
vector<pair<string, int> > items;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
for (const auto& del_info : m_retry_delete_queue) {
items.push_back(make_pair(del_info->image_id,
del_info->error_code));
uint64_t max_concurrent_deletions = cct->_conf.get_val<uint64_t>(
"rbd_mirror_concurrent_image_deletions");
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
while (true) {
if (!m_running || m_delete_queue.empty() ||
m_in_flight_delete_queue.size() >= max_concurrent_deletions) {
template <typename I>
void ImageDeleter<I>::remove_image(DeleteInfoRef delete_info) {
dout(10) << "info=" << *delete_info << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_in_flight_delete_queue.push_back(delete_info);
m_async_op_tracker.start_op();
dout(10) << "info=" << *delete_info << ", r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
- ceph_assert(m_lock.is_locked());
+ std::lock_guard locker{m_lock};
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = std::find(m_in_flight_delete_queue.begin(),
m_in_flight_delete_queue.end(), delete_info);
ceph_assert(it != m_in_flight_delete_queue.end());
template <typename I>
void ImageDeleter<I>::schedule_retry_timer() {
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (!m_running || m_timer_ctx != nullptr || m_retry_delete_queue.empty()) {
return;
}
template <typename I>
void ImageDeleter<I>::cancel_retry_timer() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
if (m_timer_ctx != nullptr) {
bool canceled = m_threads->timer->cancel_event(m_timer_ctx);
m_timer_ctx = nullptr;
template <typename I>
void ImageDeleter<I>::handle_retry_timer() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- Mutex::Locker locker(m_lock);
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ std::lock_guard locker{m_lock};
ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
ceph_assert(!m_retry_delete_queue.empty());
// move all ready-to-ready items back to main queue
- utime_t now = ceph_clock_now();
+ auto now = clock_t::now();
while (!m_retry_delete_queue.empty()) {
auto &delete_info = m_retry_delete_queue.front();
if (delete_info->retry_time > now) {
template <typename I>
void ImageDeleter<I>::handle_trash_image(const std::string& image_id,
- const utime_t& deferment_end_time) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ const ImageDeleter<I>::clock_t::time_point& deferment_end_time) {
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
auto del_info = find_delete_info(image_id);
if (del_info != nullptr) {
}
dout(10) << "image_id=" << image_id << ", "
- << "deferment_end_time=" << deferment_end_time << dendl;
+ << "deferment_end_time=" << utime_t{deferment_end_time} << dendl;
del_info.reset(new DeleteInfo(image_id));
del_info->retry_time = deferment_end_time;
#include "include/utime.h"
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/image_deleter/Types.h"
#include <atomic>
}
private:
+ using clock_t = ceph::real_clock;
struct TrashListener : public image_deleter::TrashListener {
ImageDeleter *image_deleter;
}
void handle_trash_image(const std::string& image_id,
- const utime_t& deferment_end_time) override {
+ const ceph::real_clock::time_point& deferment_end_time) override {
image_deleter->handle_trash_image(image_id, deferment_end_time);
}
};
image_deleter::ErrorResult error_result = {};
int error_code = 0;
- utime_t retry_time = {};
+ clock_t::time_point retry_time;
int retries = 0;
DeleteInfo(const std::string& image_id)
AsyncOpTracker m_async_op_tracker;
- Mutex m_lock;
+ ceph::mutex m_lock;
DeleteQueue m_delete_queue;
DeleteQueue m_retry_delete_queue;
DeleteQueue m_in_flight_delete_queue;
void handle_retry_timer();
void handle_trash_image(const std::string& image_id,
- const utime_t& deferment_end_time);
+ const clock_t::time_point& deferment_end_time);
void shut_down_trash_watcher(Context* on_finish);
void wait_for_ops(Context* on_finish);
image_map::Listener &listener)
: m_ioctx(ioctx), m_threads(threads), m_instance_id(instance_id),
m_listener(listener),
- m_lock(unique_lock_name("rbd::mirror::ImageMap::m_lock", this)) {
+ m_lock(ceph::make_mutex(
+ unique_lock_name("rbd::mirror::ImageMap::m_lock", this))) {
}
template <typename I>
dout(20) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_shutting_down) {
return;
}
void ImageMap<I>::process_updates() {
dout(20) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
ceph_assert(m_timer_task == nullptr);
Updates map_updates;
Updates release_updates;
// gather updates by advancing the state machine
- m_lock.Lock();
+ m_lock.lock();
for (auto const &global_image_id : m_global_image_ids) {
image_map::ActionType action_type =
m_policy->start_action(global_image_id);
}
}
m_global_image_ids.clear();
- m_lock.Unlock();
+ m_lock.unlock();
// notify listener (acquire, release) and update on-disk map. note
// that its safe to process this outside m_lock as we still hold
template <typename I>
void ImageMap<I>::schedule_update_task() {
- Mutex::Locker timer_lock(m_threads->timer_lock);
+ std::lock_guard timer_lock{m_threads->timer_lock};
schedule_update_task(m_threads->timer_lock);
}
template <typename I>
-void ImageMap<I>::schedule_update_task(const Mutex &timer_lock) {
- ceph_assert(m_threads->timer_lock.is_locked());
+void ImageMap<I>::schedule_update_task(const ceph::mutex &timer_lock) {
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
schedule_rebalance_task();
}
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_global_image_ids.empty()) {
return;
}
}
m_timer_task = new FunctionContext([this](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_timer_task = nullptr;
process_updates();
ceph_assert(m_rebalance_task == nullptr);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_async_op_tracker.empty() && m_global_image_ids.empty()){
dout(20) << "starting rebalance" << dendl;
template <typename I>
void ImageMap<I>::schedule_rebalance_task() {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
}
m_rebalance_task = new FunctionContext([this](int _) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_rebalance_task = nullptr;
rebalance();
template <typename I>
void ImageMap<I>::schedule_action(const std::string &global_image_id) {
dout(20) << "global_image_id=" << global_image_id << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_global_image_ids.emplace(global_image_id);
}
dout(20) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_policy->init(image_mapping);
for (auto& pair : image_mapping) {
template <typename I>
void ImageMap<I>::handle_peer_ack_remove(const std::string &global_image_id,
int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dout(5) << "global_image_id=" << global_image_id << dendl;
if (r < 0) {
const std::set<std::string> &global_image_ids) {
dout(5) << "peer_uuid=" << peer_uuid << ", "
<< "global_image_ids=[" << global_image_ids << "]" << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
for (auto const &global_image_id : global_image_ids) {
auto result = m_peer_map[global_image_id].insert(peer_uuid);
const std::set<std::string> &global_image_ids) {
dout(5) << "peer_uuid=" << peer_uuid << ", "
<< "global_image_ids=[" << global_image_ids << "]" << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Updates to_remove;
for (auto const &global_image_id : global_image_ids) {
void ImageMap<I>::update_instances_added(
const std::vector<std::string> &instance_ids) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_shutting_down) {
return;
}
void ImageMap<I>::update_instances_removed(
const std::vector<std::string> &instance_ids) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_shutting_down) {
return;
}
<< removed_global_image_ids.size() << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_shutting_down) {
return;
}
dout(20) << dendl;
{
- Mutex::Locker timer_lock(m_threads->timer_lock);
+ std::lock_guard timer_lock{m_threads->timer_lock};
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_shutting_down);
m_shutting_down = true;
#include <vector>
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "cls/rbd/cls_rbd_types.h"
std::unique_ptr<image_map::Policy> m_policy; // our mapping policy
Context *m_timer_task = nullptr;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_shutting_down = false;
AsyncOpTracker m_async_op_tracker;
void schedule_action(const std::string &global_image_id);
void schedule_update_task();
- void schedule_update_task(const Mutex &timer_lock);
+ void schedule_update_task(const ceph::mutex &timer_lock);
void process_updates();
void update_image_mapping(Updates&& map_updates,
std::set<std::string>&& map_removals);
m_local_mirror_uuid(local_mirror_uuid),
m_local_pool_id(local_pool_id),
m_global_image_id(global_image_id), m_local_image_name(global_image_id),
- m_lock("rbd::mirror::ImageReplayer " + stringify(local_pool_id) + " " +
- global_image_id),
+ m_lock(ceph::make_mutex("rbd::mirror::ImageReplayer " + stringify(local_pool_id) + " " +
+ global_image_id)),
m_progress_cxt(this),
m_journal_listener(new JournalListener(this)),
m_remote_listener(this)
template <typename I>
image_replayer::HealthState ImageReplayer<I>::get_health_state() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_mirror_image_status_state) {
return image_replayer::HEALTH_STATE_OK;
template <typename I>
void ImageReplayer<I>::add_peer(const std::string &peer_uuid,
librados::IoCtx &io_ctx) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_peers.find({peer_uuid});
if (it == m_peers.end()) {
m_peers.insert({peer_uuid, io_ctx});
void ImageReplayer<I>::set_state_description(int r, const std::string &desc) {
dout(10) << r << " " << desc << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
m_last_r = r;
m_state_desc = desc;
}
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!is_stopped_()) {
derr << "already running" << dendl;
r = -EINVAL;
BootstrapRequest<I> *request = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (on_start_interrupted(m_lock)) {
return;
}
void ImageReplayer<I>::handle_bootstrap(int r) {
dout(10) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_bootstrap_request->put();
m_bootstrap_request = nullptr;
if (m_local_image_ctx) {
ceph_assert(m_local_journal == nullptr);
{
- RWLock::RLocker image_locker(m_local_image_ctx->image_lock);
+ std::shared_lock image_locker{m_local_image_ctx->image_lock};
if (m_local_image_ctx->journal != nullptr) {
m_local_journal = m_local_image_ctx->journal;
m_local_journal->add_listener(m_journal_listener);
Context *on_finish(nullptr);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STARTING);
m_state = STATE_REPLAYING;
std::swap(m_on_start_finish, on_finish);
double poll_seconds = cct->_conf.get_val<double>(
"rbd_mirror_journal_poll_age");
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_replay_handler = new ReplayHandler<I>(this);
m_remote_journaler->start_live_replay(m_replay_handler, poll_seconds);
dout(10) << "r=" << r << dendl;
Context *ctx = new FunctionContext([this, r, desc](int _r) {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STARTING);
m_state = STATE_STOPPING;
if (r < 0 && r != -ECANCELED && r != -EREMOTEIO && r != -ENOENT) {
template <typename I>
bool ImageReplayer<I>::on_start_interrupted() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return on_start_interrupted(m_lock);
}
template <typename I>
-bool ImageReplayer<I>::on_start_interrupted(Mutex& lock) {
- ceph_assert(m_lock.is_locked());
+bool ImageReplayer<I>::on_start_interrupted(ceph::mutex& lock) {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_STARTING);
if (!m_stop_requested) {
return false;
bool shut_down_replay = false;
bool running = true;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!is_running_()) {
running = false;
dout(10) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
// might be invoked multiple times while stopping
return;
m_event_replay_tracker.start_op();
- m_lock.Lock();
+ m_lock.lock();
bool stopping = (m_state == STATE_STOPPING);
- m_lock.Unlock();
+ m_lock.unlock();
if (stopping) {
dout(10) << "stopping event replay" << dendl;
template <typename I>
void ImageReplayer<I>::flush_local_replay(Context* on_flush)
{
- m_lock.Lock();
+ m_lock.lock();
if (m_state != STATE_REPLAYING) {
- m_lock.Unlock();
+ m_lock.unlock();
on_flush->complete(0);
return;
}
handle_flush_local_replay(on_flush, r);
});
m_local_replay->flush(ctx);
- m_lock.Unlock();
+ m_lock.unlock();
}
template <typename I>
template <typename I>
void ImageReplayer<I>::flush_commit_position(Context* on_flush)
{
- m_lock.Lock();
+ m_lock.lock();
if (m_state != STATE_REPLAYING) {
- m_lock.Unlock();
+ m_lock.unlock();
on_flush->complete(0);
return;
}
handle_flush_commit_position(on_flush, r);
});
m_remote_journaler->flush_commit_position(ctx);
- m_lock.Unlock();
+ m_lock.unlock();
}
template <typename I>
{
bool shut_down;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
shut_down = m_stop_requested;
}
{
dout(10) << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (f) {
f->open_object_section("image_replayer");
}
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_stop_requested = true;
}
on_stop_journal_replay(r, error_desc);
bool interrupted = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
dout(10) << "replay interrupted" << dendl;
interrupted = true;
dout(10) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_REPLAY_FLUSHING);
m_state = STATE_REPLAYING;
}
return;
} else {
dout(5) << "encountered image demotion: stopping" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_stop_requested = true;
}
}
dout(20) << "delaying replay by " << delay << " sec" << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
ceph_assert(m_delayed_preprocess_task == nullptr);
m_delayed_preprocess_task = new FunctionContext(
[this](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_delayed_preprocess_task = nullptr;
m_threads->work_queue->queue(
create_context_callback<ImageReplayer,
bool update_status = false;
{
- RWLock::RLocker image_locker(m_local_image_ctx->image_lock);
+ std::shared_lock image_locker{m_local_image_ctx->image_lock};
if (m_local_image_name != m_local_image_ctx->name) {
m_local_image_name = m_local_image_ctx->name;
update_status = true;
auto ctx = new FunctionContext(
[this, bytes, latency](int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_perf_counters) {
m_perf_counters->inc(l_rbd_mirror_replay);
m_perf_counters->inc(l_rbd_mirror_replay_bytes, bytes);
const OptionalState &state) {
dout(15) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!start_mirror_image_status_update(force, false)) {
return false;
}
template <typename I>
bool ImageReplayer<I>::start_mirror_image_status_update(bool force,
bool restarting) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (!force && !is_stopped_()) {
if (!is_running_()) {
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_status_updates > 0);
if (--m_in_flight_status_updates > 0) {
dout(15) << "waiting on " << m_in_flight_status_updates << " in-flight "
boost::make_optional(false, cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN);
image_replayer::BootstrapRequest<I>* bootstrap_request = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
state = m_state;
state_desc = m_state_desc;
mirror_image_status_state = m_mirror_image_status_state;
}
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_mirror_image_status_state = mirror_image_status_state;
}
bool running = false;
bool started = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
bool update_status_requested = false;
std::swap(update_status_requested, m_update_status_requested);
void ImageReplayer<I>::reschedule_update_status_task(int new_interval) {
bool canceled_task = false;
{
- Mutex::Locker locker(m_lock);
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard locker{m_lock};
+ std::lock_guard timer_locker{m_threads->timer_lock};
if (m_update_status_task) {
dout(15) << "canceling existing status update task" << dendl;
start_mirror_image_status_update(true, false)) {
m_update_status_task = new FunctionContext(
[this](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_update_status_task = nullptr;
queue_mirror_image_status_update(boost::none);
bool canceled_delayed_preprocess_task = false;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
if (m_delayed_preprocess_task != nullptr) {
canceled_delayed_preprocess_task = m_threads->timer->cancel_event(
m_delayed_preprocess_task);
reschedule_update_status_task(-1);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STOPPING);
// if status updates are in-flight, wait for them to complete
bool delete_requested = false;
bool unregister_asok_hook = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
// if status updates are in-flight, wait for them to complete
// before proceeding
Context *on_start = nullptr;
Context *on_stop = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_start, m_on_start_finish);
std::swap(on_stop, m_on_stop_finish);
m_stop_requested = false;
cls::journal::Client client;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!is_running_()) {
return;
}
void ImageReplayer<I>::register_admin_socket_hook() {
ImageReplayerAdminSocketHook<I> *asok_hook;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_asok_hook != nullptr) {
return;
}
AdminSocketHook *asok_hook = nullptr;
PerfCounters *perf_counters = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(asok_hook, m_asok_hook);
std::swap(perf_counters, m_perf_counters);
}
template <typename I>
void ImageReplayer<I>::reregister_admin_socket_hook() {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto name = m_local_ioctx->get_pool_name() + "/" + m_local_image_name;
if (m_asok_hook != nullptr && m_name == name) {
return;
#define CEPH_RBD_MIRROR_IMAGE_REPLAYER_H
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "include/rados/librados.hpp"
#include "cls/journal/cls_journal_types.h"
ImageReplayer(const ImageReplayer&) = delete;
ImageReplayer& operator=(const ImageReplayer&) = delete;
- bool is_stopped() { Mutex::Locker l(m_lock); return is_stopped_(); }
- bool is_running() { Mutex::Locker l(m_lock); return is_running_(); }
- bool is_replaying() { Mutex::Locker l(m_lock); return is_replaying_(); }
+ bool is_stopped() { std::lock_guard l{m_lock}; return is_stopped_(); }
+ bool is_running() { std::lock_guard l{m_lock}; return is_running_(); }
+ bool is_replaying() { std::lock_guard l{m_lock}; return is_replaying_(); }
- std::string get_name() { Mutex::Locker l(m_lock); return m_name; };
+ std::string get_name() { std::lock_guard l{m_lock}; return m_name; };
void set_state_description(int r, const std::string &desc);
// TODO temporary until policy handles release of image replayers
inline bool is_finished() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_finished;
}
inline void set_finished(bool finished) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_finished = finished;
}
inline bool is_blacklisted() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return (m_last_r == -EBLACKLISTED);
}
virtual void on_start_fail(int r, const std::string &desc);
virtual bool on_start_interrupted();
- virtual bool on_start_interrupted(Mutex& lock);
+ virtual bool on_start_interrupted(ceph::mutex& lock);
virtual void on_stop_journal_replay(int r = 0, const std::string &desc = "");
std::string m_local_image_name;
std::string m_name;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
State m_state = STATE_STOPPED;
std::string m_state_desc;
template <typename I>
ImageSync<I>::ImageSync(I *local_image_ctx, I *remote_image_ctx,
- SafeTimer *timer, Mutex *timer_lock,
+ SafeTimer *timer, ceph::mutex *timer_lock,
const std::string &mirror_uuid, Journaler *journaler,
MirrorPeerClientMeta *client_meta,
ContextWQ *work_queue,
m_journaler(journaler), m_client_meta(client_meta),
m_work_queue(work_queue), m_instance_watcher(instance_watcher),
m_progress_ctx(progress_ctx),
- m_lock(unique_lock_name("ImageSync::m_lock", this)),
+ m_lock(ceph::make_mutex(unique_lock_name("ImageSync::m_lock", this))),
m_update_sync_point_interval(m_local_image_ctx->cct->_conf.template get_val<double>(
"rbd_mirror_sync_point_update_age")), m_client_meta_copy(*client_meta) {
}
template <typename I>
void ImageSync<I>::cancel() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
dout(10) << dendl;
dout(10) << dendl;
- m_lock.Lock();
+ m_lock.lock();
if (m_canceled) {
- m_lock.Unlock();
+ m_lock.unlock();
BaseRequest::finish(-ECANCELED);
return;
}
m_work_queue, create_context_callback<
ImageSync<I>, &ImageSync<I>::handle_notify_sync_request>(this));
m_instance_watcher->notify_sync_request(m_local_image_ctx->id, ctx);
- m_lock.Unlock();
+ m_lock.unlock();
}
template <typename I>
void ImageSync<I>::handle_notify_sync_request(int r) {
dout(10) << ": r=" << r << dendl;
- m_lock.Lock();
+ m_lock.lock();
if (r == 0 && m_canceled) {
r = -ECANCELED;
}
- m_lock.Unlock();
+ m_lock.unlock();
if (r < 0) {
BaseRequest::finish(r);
librbd::deep_copy::ObjectNumber object_number;
int r = 0;
{
- RWLock::RLocker image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock image_locker{m_remote_image_ctx->image_lock};
ceph_assert(!m_client_meta->sync_points.empty());
auto &sync_point = m_client_meta->sync_points.front();
snap_id_end = m_remote_image_ctx->get_snap_id(
return;
}
- m_lock.Lock();
+ m_lock.lock();
if (m_canceled) {
- m_lock.Unlock();
+ m_lock.unlock();
finish(-ECANCELED);
return;
}
false, object_number, m_work_queue, &m_client_meta->snap_seqs,
m_image_copy_prog_ctx, ctx);
m_image_copy_request->get();
- m_lock.Unlock();
+ m_lock.unlock();
update_progress("COPY_IMAGE");
dout(10) << ": r=" << r << dendl;
{
- Mutex::Locker timer_locker(*m_timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{*m_timer_lock, m_lock};
m_image_copy_request->put();
m_image_copy_request = nullptr;
delete m_image_copy_prog_ctx;
int percent = 100 * object_no / object_count;
update_progress("COPY_IMAGE " + stringify(percent) + "%");
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_image_copy_object_no = object_no;
m_image_copy_object_count = object_count;
template <typename I>
void ImageSync<I>::send_update_sync_point() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_update_sync_ctx = nullptr;
}
{
- Mutex::Locker timer_locker(*m_timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{*m_timer_lock, m_lock};
m_updating_sync_point = false;
if (m_image_copy_request != nullptr) {
m_update_sync_ctx = new FunctionContext(
[this](int r) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
this->send_update_sync_point();
});
m_timer->add_event_after(m_update_sync_point_interval,
#include "librbd/ImageCtx.h"
#include "librbd/journal/TypeTraits.h"
#include "librbd/journal/Types.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/BaseRequest.h"
#include <map>
#include <vector>
static ImageSync* create(ImageCtxT *local_image_ctx,
ImageCtxT *remote_image_ctx,
- SafeTimer *timer, Mutex *timer_lock,
+ SafeTimer *timer, ceph::mutex *timer_lock,
const std::string &mirror_uuid,
Journaler *journaler,
MirrorPeerClientMeta *client_meta,
}
ImageSync(ImageCtxT *local_image_ctx, ImageCtxT *remote_image_ctx,
- SafeTimer *timer, Mutex *timer_lock, const std::string &mirror_uuid,
+ SafeTimer *timer, ceph::mutex *timer_lock, const std::string &mirror_uuid,
Journaler *journaler, MirrorPeerClientMeta *client_meta,
ContextWQ *work_queue, InstanceWatcher<ImageCtxT> *instance_watcher,
Context *on_finish, ProgressContext *progress_ctx = nullptr);
ImageCtxT *m_local_image_ctx;
ImageCtxT *m_remote_image_ctx;
SafeTimer *m_timer;
- Mutex *m_timer_lock;
+ ceph::mutex *m_timer_lock;
std::string m_mirror_uuid;
Journaler *m_journaler;
MirrorPeerClientMeta *m_client_meta;
SnapMap m_snap_map;
- Mutex m_lock;
+ ceph::mutex m_lock;
bool m_canceled = false;
librbd::DeepCopyRequest<ImageCtxT> *m_image_copy_request = nullptr;
template <typename I>
ImageSyncThrottler<I>::ImageSyncThrottler(CephContext *cct)
: m_cct(cct),
- m_lock(librbd::util::unique_lock_name("rbd::mirror::ImageSyncThrottler",
- this)),
+ m_lock(ceph::make_mutex(
+ librbd::util::unique_lock_name("rbd::mirror::ImageSyncThrottler",
+ this))),
m_max_concurrent_syncs(cct->_conf.get_val<uint64_t>(
"rbd_mirror_concurrent_image_syncs")) {
dout(20) << "max_concurrent_syncs=" << m_max_concurrent_syncs << dendl;
ImageSyncThrottler<I>::~ImageSyncThrottler() {
m_cct->_conf.remove_observer(this);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_inflight_ops.empty());
ceph_assert(m_queue.empty());
}
int r = 0;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_inflight_ops.count(id) > 0) {
dout(20) << "duplicate for already started op " << id << dendl;
Context *on_start = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_queued_ops.find(id);
if (it != m_queued_ops.end()) {
dout(20) << "canceled queued sync for " << id << dendl;
Context *on_start = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_inflight_ops.erase(id);
std::map<std::string, Context *> queued_ops;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(m_queued_ops, queued_ops);
m_queue.clear();
m_inflight_ops.clear();
std::list<Context *> ops;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_max_concurrent_syncs = max;
// Start waiting ops in the case of available free slots
}
template <typename I>
-void ImageSyncThrottler<I>::print_status(Formatter *f, std::stringstream *ss) {
+void ImageSyncThrottler<I>::print_status(ceph::Formatter *f, std::stringstream *ss) {
dout(20) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (f) {
f->dump_int("max_parallel_syncs", m_max_concurrent_syncs);
#include <string>
#include <utility>
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/config_obs.h"
class CephContext;
void finish_op(const std::string &id);
void drain(int r);
- void print_status(Formatter *f, std::stringstream *ss);
+ void print_status(ceph::Formatter *f, std::stringstream *ss);
private:
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock;
uint32_t m_max_concurrent_syncs;
std::list<std::string> m_queue;
std::map<std::string, Context *> m_queued_ops;
// vim: ts=8 sw=2 smarttab
#include "include/stringify.h"
+#include "common/Cond.h"
#include "common/Timer.h"
#include "common/debug.h"
#include "common/errno.h"
: m_threads(threads), m_service_daemon(service_daemon),
m_cache_manager_handler(cache_manager_handler), m_local_rados(local_rados),
m_local_mirror_uuid(local_mirror_uuid), m_local_pool_id(local_pool_id),
- m_lock("rbd::mirror::InstanceReplayer " + stringify(local_pool_id)) {
+ m_lock(ceph::make_mutex(
+ "rbd::mirror::InstanceReplayer " + stringify(local_pool_id))) {
}
template <typename I>
Context *ctx = new FunctionContext(
[this, on_finish] (int r) {
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
schedule_image_state_check_task();
}
on_finish->complete(0);
void InstanceReplayer<I>::shut_down(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down == nullptr);
m_on_shut_down = on_finish;
librados::IoCtx io_ctx) {
dout(10) << peer_uuid << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto result = m_peers.insert(Peer(peer_uuid, io_ctx)).second;
ceph_assert(result);
}
void InstanceReplayer<I>::release_all(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
C_Gather *gather_ctx = new C_Gather(g_ceph_context, on_finish);
for (auto it = m_image_replayers.begin(); it != m_image_replayers.end();
Context *on_finish) {
dout(10) << "global_image_id=" << global_image_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down == nullptr);
Context *on_finish) {
dout(10) << "global_image_id=" << global_image_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down == nullptr);
auto it = m_image_replayers.find(global_image_id);
dout(10) << "global_image_id=" << global_image_id << ", "
<< "peer_mirror_uuid=" << peer_mirror_uuid << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down == nullptr);
auto it = m_image_replayers.find(global_image_id);
return;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
f->open_array_section("image_replayers");
for (auto &kv : m_image_replayers) {
{
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_manual_stop = false;
{
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_manual_stop = true;
{
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_manual_stop = false;
{
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto &kv : m_image_replayers) {
auto &image_replayer = kv.second;
template <typename I>
void InstanceReplayer<I>::start_image_replayer(
ImageReplayer<I> *image_replayer) {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
std::string global_image_id = image_replayer->get_global_image_id();
if (!image_replayer->is_stopped()) {
void InstanceReplayer<I>::start_image_replayers(int r) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_on_shut_down != nullptr) {
return;
}
<< after << " sec (task " << ctx << ")" << dendl;
ctx = new FunctionContext(
[this, after, ctx] (int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
m_threads->timer->add_event_after(after, ctx);
});
m_threads->work_queue->queue(ctx, 0);
ceph_assert(r == 0);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
stop_image_replayers();
}
void InstanceReplayer<I>::stop_image_replayers() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_threads->work_queue, create_context_callback<InstanceReplayer<I>,
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto &it : m_image_replayers) {
ceph_assert(it.second->is_stopped());
template <typename I>
void InstanceReplayer<I>::cancel_image_state_check_task() {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
if (m_image_state_check_task == nullptr) {
return;
template <typename I>
void InstanceReplayer<I>::schedule_image_state_check_task() {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
ceph_assert(m_image_state_check_task == nullptr);
m_image_state_check_task = new FunctionContext(
[this](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_image_state_check_task = nullptr;
schedule_image_state_check_task();
queue_start_image_replayers();
#include "common/AsyncOpTracker.h"
#include "common/Formatter.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/Types.h"
namespace journal { struct CacheManagerHandler; }
std::string m_local_mirror_uuid;
int64_t m_local_pool_id;
- Mutex m_lock;
+ ceph::mutex m_lock;
AsyncOpTracker m_async_op_tracker;
std::map<std::string, ImageReplayer<ImageCtxT> *> m_image_replayers;
Peers m_peers;
<< ": instance_watcher=" << instance_watcher << ", instance_id="
<< instance_id << ", request_id=" << request_id << dendl;
- ceph_assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(instance_watcher->m_lock));
if (!send_to_leader) {
ceph_assert((!instance_id.empty()));
void send() {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__ << dendl;
- ceph_assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(instance_watcher->m_lock));
if (canceling) {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__
void cancel() {
dout(10) << "C_NotifyInstanceRequest: " << this << " " << __func__ << dendl;
- ceph_assert(instance_watcher->m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(instance_watcher->m_lock));
canceling = true;
instance_watcher->unsuspend_notify_request(this);
if (r == -ETIMEDOUT) {
derr << "C_NotifyInstanceRequest: " << this << " " << __func__
<< ": resending after timeout" << dendl;
- Mutex::Locker locker(instance_watcher->m_lock);
+ std::lock_guard locker{instance_watcher->m_lock};
send();
return;
} else {
if (r == -ESTALE && send_to_leader) {
derr << "C_NotifyInstanceRequest: " << this << " " << __func__
<< ": resending due to leader change" << dendl;
- Mutex::Locker locker(instance_watcher->m_lock);
+ std::lock_guard locker{instance_watcher->m_lock};
send();
return;
}
on_finish->complete(r);
{
- Mutex::Locker locker(instance_watcher->m_lock);
+ std::lock_guard locker{instance_watcher->m_lock};
auto result = instance_watcher->m_notify_ops.erase(
std::make_pair(instance_id, this));
ceph_assert(result > 0);
const std::string &instance_id)
: Watcher(io_ctx, work_queue, RBD_MIRROR_INSTANCE_PREFIX + instance_id),
m_instance_replayer(instance_replayer), m_instance_id(instance_id),
- m_lock(unique_lock_name("rbd::mirror::InstanceWatcher::m_lock", this)),
+ m_lock(ceph::make_mutex(
+ unique_lock_name("rbd::mirror::InstanceWatcher::m_lock", this))),
m_instance_lock(librbd::ManagedLock<I>::create(
m_ioctx, m_work_queue, m_oid, this, librbd::managed_lock::EXCLUSIVE, true,
m_cct->_conf.get_val<uint64_t>("rbd_blacklist_expire_seconds"))) {
void InstanceWatcher<I>::init(Context *on_finish) {
dout(10) << "instance_id=" << m_instance_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
void InstanceWatcher<I>::shut_down(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
void InstanceWatcher<I>::remove(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
dout(10) << "instance_id=" << instance_id << ", global_image_id="
<< global_image_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
dout(10) << "instance_id=" << instance_id << ", global_image_id="
<< global_image_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
<< "global_image_id=" << global_image_id << ", "
<< "peer_mirror_uuid=" << peer_mirror_uuid << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
uint64_t request_id = ++m_request_seq;
Context *on_sync_start) {
dout(10) << "sync_id=" << sync_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_inflight_sync_reqs.count(sync_id) == 0);
bool InstanceWatcher<I>::cancel_sync_request(const std::string &sync_id) {
dout(10) << "sync_id=" << sync_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_inflight_sync_reqs.find(sync_id);
if (it == m_inflight_sync_reqs.end()) {
const std::string &sync_id) {
dout(10) << "sync_id=" << sync_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
uint64_t request_id = ++m_request_seq;
auto ctx = new FunctionContext(
[this, sync_id] (int r) {
dout(10) << "finish: sync_id=" << sync_id << ", r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r != -ESTALE && m_image_sync_throttler != nullptr) {
m_image_sync_throttler->finish_op(sync_id);
}
template <typename I>
void InstanceWatcher<I>::notify_sync_complete(const std::string &sync_id) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
notify_sync_complete(m_lock, sync_id);
}
template <typename I>
-void InstanceWatcher<I>::notify_sync_complete(const Mutex&,
+void InstanceWatcher<I>::notify_sync_complete(const ceph::mutex&,
const std::string &sync_id) {
dout(10) << "sync_id=" << sync_id << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = m_inflight_sync_reqs.find(sync_id);
ceph_assert(it != m_inflight_sync_reqs.end());
Context *on_start = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(sync_ctx->req != nullptr);
ceph_assert(sync_ctx->on_start != nullptr);
void InstanceWatcher<I>::print_sync_status(Formatter *f, stringstream *ss) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_image_sync_throttler != nullptr) {
m_image_sync_throttler->print_status(f, ss);
}
void InstanceWatcher<I>::handle_acquire_leader() {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_sync_throttler == nullptr);
m_image_sync_throttler = ImageSyncThrottler<I>::create(m_cct);
void InstanceWatcher<I>::handle_release_leader() {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_sync_throttler != nullptr);
const std::string &leader_instance_id) {
dout(10) << "leader_instance_id=" << leader_instance_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_leader_instance_id = leader_instance_id;
const std::string &instance_id) {
dout(10) << "instance_id=" << instance_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto op : m_notify_ops) {
if (op.first == instance_id && !op.second->send_to_leader) {
template <typename I>
void InstanceWatcher<I>::register_instance() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
dout(10) << dendl;
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r == 0) {
create_instance_object();
void InstanceWatcher<I>::create_instance_object() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
op.create(true);
void InstanceWatcher<I>::handle_create_instance_object(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
derr << "error creating " << m_oid << " object: " << cpp_strerror(r)
void InstanceWatcher<I>::register_watch() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::handle_register_watch(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
derr << "error registering instance watcher for " << m_oid << " object: "
void InstanceWatcher<I>::acquire_lock() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
void InstanceWatcher<I>::release_lock() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::handle_release_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
derr << "error releasing instance lock: " << cpp_strerror(r) << dendl;
void InstanceWatcher<I>::unregister_watch() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
<< cpp_strerror(r) << dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
remove_instance_object();
}
template <typename I>
void InstanceWatcher<I>::remove_instance_object() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
dout(10) << dendl;
<< dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
unregister_instance();
}
void InstanceWatcher<I>::unregister_instance() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
librbd::cls_client::mirror_instances_remove(&op, m_instance_id);
derr << "error unregistering instance: " << cpp_strerror(r) << dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
wait_for_notify_ops();
}
void InstanceWatcher<I>::wait_for_notify_ops() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
for (auto op : m_notify_ops) {
op.second->cancel();
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_notify_ops.empty());
void InstanceWatcher<I>::get_instance_locker() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::handle_get_instance_locker(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
if (r != -ENOENT) {
void InstanceWatcher<I>::break_instance_lock() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void InstanceWatcher<I>::handle_break_instance_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
if (r != -ENOENT) {
void InstanceWatcher<I>::suspend_notify_request(C_NotifyInstanceRequest *req) {
dout(10) << req << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto result = m_suspended_ops.insert(req).second;
ceph_assert(result);
C_NotifyInstanceRequest *req) {
dout(10) << req << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto result = m_suspended_ops.erase(req);
if (result == 0) {
void InstanceWatcher<I>::unsuspend_notify_requests() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
std::set<C_NotifyInstanceRequest *> suspended_ops;
std::swap(m_suspended_ops, suspended_ops);
dout(10) << "instance_id=" << instance_id << ", request_id=" << request_id
<< dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
Context *ctx = nullptr;
Request request(instance_id, request_id);
C_NotifyAck *on_notify_ack;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
Request request(instance_id, request_id);
auto it = m_requests.find(request);
ceph_assert(it != m_requests.end());
Context *on_finish) {
dout(10) << "instance_id=" << instance_id << ", sync_id=" << sync_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_image_sync_throttler == nullptr) {
dout(10) << "sync request for non-leader" << dendl;
Context *on_finish) {
dout(10) << "instance_id=" << instance_id << ", sync_id=" << sync_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_inflight_sync_reqs.find(sync_id);
if (it == m_inflight_sync_reqs.end()) {
InstanceReplayer<ImageCtxT> *m_instance_replayer;
std::string m_instance_id;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
librbd::ManagedLock<ImageCtxT> *m_instance_lock;
Context *m_on_finish = nullptr;
int m_ret_val = 0;
bool unsuspend_notify_request(C_NotifyInstanceRequest *req);
void unsuspend_notify_requests();
- void notify_sync_complete(const Mutex& lock, const std::string &sync_id);
+ void notify_sync_complete(const ceph::mutex& lock, const std::string &sync_id);
void handle_notify_sync_request(C_SyncRequest *sync_ctx, int r);
void handle_notify_sync_complete(C_SyncRequest *sync_ctx, int r);
instances::Listener& listener) :
m_threads(threads), m_ioctx(ioctx), m_instance_id(instance_id),
m_listener(listener), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
- m_lock("rbd::mirror::Instances " + ioctx.get_pool_name()) {
+ m_lock(ceph::make_mutex("rbd::mirror::Instances " + ioctx.get_pool_name())) {
}
template <typename I>
void Instances<I>::init(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
get_instances();
void Instances<I>::shut_down(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
Context *ctx = new FunctionContext(
[this](int r) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
cancel_remove_task();
wait_for_ops();
});
void Instances<I>::unblock_listener() {
dout(5) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_listener_blocked);
m_listener_blocked = false;
void Instances<I>::acked(const InstanceIds& instance_ids) {
dout(10) << "instance_ids=" << instance_ids << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_on_finish != nullptr) {
dout(5) << "received on shut down, ignoring" << dendl;
return;
void Instances<I>::handle_acked(const InstanceIds& instance_ids) {
dout(5) << "instance_ids=" << instance_ids << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (m_on_finish != nullptr) {
dout(5) << "handled on shut down, ignoring" << dendl;
return;
}
InstanceIds added_instance_ids;
- auto time = ceph_clock_now();
+ auto time = clock_t::now();
for (auto& instance_id : instance_ids) {
auto &instance = m_instances.insert(
std::make_pair(instance_id, Instance{})).first->second;
template <typename I>
void Instances<I>::notify_instances_added(const InstanceIds& instance_ids) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
InstanceIds added_instance_ids;
for (auto& instance_id : instance_ids) {
auto it = m_instances.find(instance_id);
}
dout(5) << "instance_ids=" << added_instance_ids << dendl;
- m_lock.Unlock();
+ m_lock.unlock();
m_listener.handle_added(added_instance_ids);
- m_lock.Lock();
+ m_lock.lock();
for (auto& instance_id : added_instance_ids) {
auto it = m_instances.find(instance_id);
dout(5) << "instance_ids=" << instance_ids << dendl;
m_listener.handle_removed(instance_ids);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto& instance_id : instance_ids) {
m_instances.erase(instance_id);
}
void Instances<I>::list(std::vector<std::string> *instance_ids) {
dout(20) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto it : m_instances) {
instance_ids->push_back(it.first);
void Instances<I>::get_instances() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_context_callback<
Instances, &Instances<I>::handle_get_instances>(this);
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_finish, m_on_finish);
}
void Instances<I>::wait_for_ops() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_threads->work_queue, create_context_callback<
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_finish, m_on_finish);
}
on_finish->complete(r);
}
template <typename I>
-void Instances<I>::remove_instances(const utime_t& time) {
- ceph_assert(m_lock.is_locked());
+void Instances<I>::remove_instances(const Instances<I>::clock_t::time_point& time) {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
InstanceIds instance_ids;
for (auto& instance_pair : m_instances) {
template <typename I>
void Instances<I>::handle_remove_instances(
int r, const InstanceIds& instance_ids) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
dout(10) << "r=" << r << ", instance_ids=" << instance_ids << dendl;
ceph_assert(r == 0);
new C_NotifyInstancesRemoved(this, instance_ids), 0);
// reschedule the timer for the next batch
- schedule_remove_task(ceph_clock_now());
+ schedule_remove_task(clock_t::now());
m_async_op_tracker.finish_op();
}
template <typename I>
void Instances<I>::cancel_remove_task() {
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_timer_task == nullptr) {
return;
}
template <typename I>
-void Instances<I>::schedule_remove_task(const utime_t& time) {
+void Instances<I>::schedule_remove_task(const Instances<I>::clock_t::time_point& time) {
cancel_remove_task();
if (m_on_finish != nullptr) {
dout(10) << "received on shut down, ignoring" << dendl;
m_cct->_conf.get_val<uint64_t>("rbd_mirror_leader_max_acquire_attempts_before_break"));
bool schedule = false;
- utime_t oldest_time = time;
+ auto oldest_time = time;
for (auto& instance : m_instances) {
if (instance.first == m_instance_id) {
continue;
// schedule a time to fire when the oldest instance should be removed
m_timer_task = new FunctionContext(
[this, oldest_time](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
- Mutex::Locker locker(m_lock);
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ std::lock_guard locker{m_lock};
m_timer_task = nullptr;
remove_instances(oldest_time);
});
- oldest_time += after;
+ oldest_time += ceph::make_timespan(after);
m_threads->timer->add_event_at(oldest_time, m_timer_task);
}
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/Watcher.h"
#include "tools/rbd_mirror/instances/Types.h"
INSTANCE_STATE_REMOVING
};
+ using clock_t = ceph::real_clock;
struct Instance {
- utime_t acked_time{};
+ clock_t::time_point acked_time{};
InstanceState state = INSTANCE_STATE_ADDING;
};
instances::Listener& m_listener;
CephContext *m_cct;
- Mutex m_lock;
+ ceph::mutex m_lock;
InstanceIds m_instance_ids;
std::map<std::string, Instance> m_instances;
Context *m_on_finish = nullptr;
void wait_for_ops();
void handle_wait_for_ops(int r);
- void remove_instances(const utime_t& time);
+ void remove_instances(const clock_t::time_point& time);
void handle_remove_instances(int r, const InstanceIds& instance_ids);
void cancel_remove_task();
- void schedule_remove_task(const utime_t& time);
+ void schedule_remove_task(const clock_t::time_point& time);
};
} // namespace mirror
// vim: ts=8 sw=2 smarttab
#include "LeaderWatcher.h"
+#include "common/Cond.h"
#include "common/Timer.h"
#include "common/debug.h"
#include "common/errno.h"
leader_watcher::Listener *listener)
: Watcher(io_ctx, threads->work_queue, RBD_MIRROR_LEADER),
m_threads(threads), m_listener(listener), m_instances_listener(this),
- m_lock("rbd::mirror::LeaderWatcher " + io_ctx.get_pool_name()),
+ m_lock(ceph::make_mutex("rbd::mirror::LeaderWatcher " +
+ io_ctx.get_pool_name())),
m_notifier_id(librados::Rados(io_ctx).get_instance_id()),
m_instance_id(stringify(m_notifier_id)),
m_leader_lock(new LeaderLock(m_ioctx, m_work_queue, m_oid, this, true,
void LeaderWatcher<I>::init(Context *on_finish) {
dout(10) << "notifier_id=" << m_notifier_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
void LeaderWatcher<I>::create_leader_object() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
op.create(false);
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r == 0) {
register_watch();
void LeaderWatcher<I>::register_watch() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
Context *on_finish = nullptr;
if (r < 0) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
derr << "error registering leader watcher for " << m_oid << " object: "
<< cpp_strerror(r) << dendl;
ceph_assert(m_on_finish != nullptr);
std::swap(on_finish, m_on_finish);
} else {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
init_status_watcher();
return;
}
void LeaderWatcher<I>::shut_down(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
m_on_shut_down_finish = on_finish;
void LeaderWatcher<I>::shut_down_leader_lock() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::handle_shut_down_leader_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
derr << "error shutting down leader lock: " << cpp_strerror(r) << dendl;
void LeaderWatcher<I>::unregister_watch() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::wait_for_tasks() {
dout(10) << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
schedule_timer_task("wait for tasks", 0, false,
&LeaderWatcher<I>::handle_wait_for_tasks, true);
}
void LeaderWatcher<I>::handle_wait_for_tasks() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_on_shut_down_finish != nullptr);
ceph_assert(!m_timer_op_tracker.empty());
Context *on_finish;
{
// ensure lock isn't held when completing shut down
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish != nullptr);
on_finish = m_on_shut_down_finish;
}
template <typename I>
bool LeaderWatcher<I>::is_leader() const {
- Mutex::Locker locker(m_lock);
-
+ std::lock_guard locker{m_lock};
return is_leader(m_lock);
}
template <typename I>
-bool LeaderWatcher<I>::is_leader(Mutex &lock) const {
- ceph_assert(m_lock.is_locked());
+bool LeaderWatcher<I>::is_leader(ceph::mutex &lock) const {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
bool leader = m_leader_lock->is_leader();
dout(10) << leader << dendl;
template <typename I>
bool LeaderWatcher<I>::is_releasing_leader() const {
- Mutex::Locker locker(m_lock);
-
+ std::lock_guard locker{m_lock};
return is_releasing_leader(m_lock);
}
template <typename I>
-bool LeaderWatcher<I>::is_releasing_leader(Mutex &lock) const {
- ceph_assert(m_lock.is_locked());
+bool LeaderWatcher<I>::is_releasing_leader(ceph::mutex &lock) const {
+ ceph_assert(ceph_mutex_is_locked(m_lock));
bool releasing = m_leader_lock->is_releasing_leader();
dout(10) << releasing << dendl;
bool LeaderWatcher<I>::get_leader_instance_id(std::string *instance_id) const {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (is_leader(m_lock) || is_releasing_leader(m_lock)) {
*instance_id = m_instance_id;
void LeaderWatcher<I>::release_leader() {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!is_leader(m_lock)) {
return;
}
void LeaderWatcher<I>::list_instances(std::vector<std::string> *instance_ids) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
instance_ids->clear();
if (m_instances != nullptr) {
template <typename I>
void LeaderWatcher<I>::cancel_timer_task() {
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_timer_task == nullptr) {
return;
int delay_factor, bool leader,
TimerCallback timer_callback,
bool shutting_down) {
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (!shutting_down && m_on_shut_down_finish != nullptr) {
return;
m_timer_task = new FunctionContext(
[this, leader, timer_callback](int r) {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
m_timer_task = nullptr;
if (m_timer_op_tracker.empty()) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
execute_timer_task(leader, timer_callback);
return;
}
TimerCallback timer_callback) {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_timer_op_tracker.empty());
if (is_leader(m_lock) != leader) {
return;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
void LeaderWatcher<I>::handle_pre_release_leader_lock(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
m_ret_val = 0;
return;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == nullptr);
m_on_finish = on_finish;
void LeaderWatcher<I>::break_leader_lock() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_timer_op_tracker.empty());
if (m_locker.cookie.empty()) {
void LeaderWatcher<I>::handle_break_leader_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
uint32_t delay_factor) {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
if (reset_leader) {
m_locker = {};
void LeaderWatcher<I>::get_locker() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_timer_op_tracker.empty());
C_GetLocker *get_locker_ctx = new C_GetLocker(this);
librbd::managed_lock::Locker& locker) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker mutex_locker(m_lock);
+ std::scoped_lock l{m_threads->timer_lock, m_lock};
ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
if (get_leader_instance_id(&instance_id)) {
m_listener->update_leader_handler(instance_id);
}
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
m_timer_op_tracker.finish_op();
});
m_work_queue->queue(ctx, 0);
void LeaderWatcher<I>::schedule_acquire_leader_lock(uint32_t delay_factor) {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
schedule_timer_task("acquire leader lock",
delay_factor *
template <typename I>
void LeaderWatcher<I>::acquire_leader_lock() {
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_timer_op_tracker.empty());
++m_acquire_attempts;
void LeaderWatcher<I>::handle_acquire_leader_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(!m_timer_op_tracker.empty());
if (m_leader_lock->is_shutdown()) {
void LeaderWatcher<I>::release_leader_lock() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::handle_release_leader_lock(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (r < 0) {
derr << "error releasing lock: " << cpp_strerror(r) << dendl;
void LeaderWatcher<I>::init_status_watcher() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_status_watcher == nullptr);
m_status_watcher = MirrorStatusWatcher<I>::create(m_ioctx, m_work_queue);
Context *on_finish = nullptr;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (r < 0) {
derr << "error initializing mirror status watcher: " << cpp_strerror(r)
void LeaderWatcher<I>::shut_down_status_watcher() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_status_watcher != nullptr);
Context *ctx = create_async_context_callback(
void LeaderWatcher<I>::handle_shut_down_status_watcher(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_status_watcher->destroy();
m_status_watcher = nullptr;
void LeaderWatcher<I>::init_instances() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_instances == nullptr);
m_instances = Instances<I>::create(m_threads, m_ioctx, m_instance_id,
Context *on_finish = nullptr;
if (r < 0) {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
derr << "error initializing instances: " << cpp_strerror(r) << dendl;
m_instances->destroy();
m_instances = nullptr;
ceph_assert(m_on_finish != nullptr);
std::swap(m_on_finish, on_finish);
} else {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
notify_listener();
return;
}
void LeaderWatcher<I>::shut_down_instances() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_instances != nullptr);
Context *ctx = create_async_context_callback(
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_instances->destroy();
m_instances = nullptr;
void LeaderWatcher<I>::notify_listener() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_async_context_callback(
m_work_queue, create_context_callback<
void LeaderWatcher<I>::handle_notify_listener(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0) {
derr << "error notifying listener: " << cpp_strerror(r) << dendl;
void LeaderWatcher<I>::notify_lock_acquired() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_context_callback<
LeaderWatcher<I>, &LeaderWatcher<I>::handle_notify_lock_acquired>(this);
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0 && r != -ETIMEDOUT) {
derr << "error notifying leader lock acquired: " << cpp_strerror(r)
<< dendl;
void LeaderWatcher<I>::notify_lock_released() {
dout(10) << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
Context *ctx = create_context_callback<
LeaderWatcher<I>, &LeaderWatcher<I>::handle_notify_lock_released>(this);
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (r < 0 && r != -ETIMEDOUT) {
derr << "error notifying leader lock released: " << cpp_strerror(r)
<< dendl;
void LeaderWatcher<I>::notify_heartbeat() {
dout(10) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
+ ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_timer_op_tracker.empty());
if (!is_leader(m_lock)) {
void LeaderWatcher<I>::handle_notify_heartbeat(int r) {
dout(10) << "r=" << r << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(!m_timer_op_tracker.empty());
m_timer_op_tracker.finish_op();
dout(10) << dendl;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (is_leader(m_lock)) {
dout(5) << "got another leader heartbeat, ignoring" << dendl;
} else {
dout(10) << dendl;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (is_leader(m_lock)) {
dout(5) << "got another leader lock_acquired, ignoring" << dendl;
} else {
dout(10) << dendl;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (is_leader(m_lock)) {
dout(5) << "got another leader lock_released, ignoring" << dendl;
} else {
}
bool is_leader() const {
- Mutex::Locker locker(Parent::m_lock);
+ std::lock_guard locker{Parent::m_lock};
return Parent::is_state_post_acquiring() || Parent::is_state_locked();
}
bool is_releasing_leader() const {
- Mutex::Locker locker(Parent::m_lock);
+ std::lock_guard locker{Parent::m_lock};
return Parent::is_state_pre_releasing();
}
void post_acquire_lock_handler(int r, Context *on_finish) {
if (r == 0) {
// lock is owned at this point
- Mutex::Locker locker(Parent::m_lock);
+ std::lock_guard locker{Parent::m_lock};
Parent::set_state_post_acquiring();
}
watcher->handle_post_acquire_leader_lock(r, on_finish);
leader_watcher::Listener *m_listener;
InstancesListener m_instances_listener;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
uint64_t m_notifier_id;
std::string m_instance_id;
LeaderLock *m_leader_lock;
librbd::watcher::NotifyResponse m_heartbeat_response;
- bool is_leader(Mutex &m_lock) const;
- bool is_releasing_leader(Mutex &m_lock) const;
+ bool is_leader(ceph::mutex &m_lock) const;
+ bool is_releasing_leader(ceph::mutex &m_lock) const;
void cancel_timer_task();
void schedule_timer_task(const std::string &name,
class CacheManagerHandler : public journal::CacheManagerHandler {
public:
CacheManagerHandler(CephContext *cct)
- : m_cct(cct), m_lock("rbd::mirror::CacheManagerHandler") {
+ : m_cct(cct) {
if (!m_cct->_conf.get_val<bool>("rbd_mirror_memory_autotune")) {
return;
}
~CacheManagerHandler() {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_caches.empty());
}
dout(20) << cache_name << " min_size=" << min_size << " max_size="
<< max_size << " handler=" << handler << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto p = m_caches.insert(
{cache_name, {cache_name, min_size, max_size, handler}});
dout(20) << cache_name << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto it = m_caches.find(cache_name);
ceph_assert(it != m_caches.end());
return;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
// Before we trim, check and see if it's time to rebalance/resize.
auto autotune_interval = m_cct->_conf.get_val<double>(
CephContext *m_cct;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock =
+ ceph::make_mutex("rbd::mirror::CacheManagerHandler");
std::unique_ptr<PriorityCache::Manager> m_cache_manager;
std::map<std::string, Cache> m_caches;
Mirror::Mirror(CephContext *cct, const std::vector<const char*> &args) :
m_cct(cct),
m_args(args),
- m_lock("rbd::mirror::Mirror"),
m_local(new librados::Rados()),
m_cache_manager_handler(new CacheManagerHandler(cct)),
m_asok_hook(new MirrorAdminSocketHook(cct, this))
{
m_stopping = true;
{
- Mutex::Locker l(m_lock);
- m_cond.Signal();
+ std::lock_guard l{m_lock};
+ m_cond.notify_all();
}
}
next_refresh_pools += m_cct->_conf.get_val<uint64_t>(
"rbd_mirror_pool_replayers_refresh_interval");
}
- Mutex::Locker l(m_lock);
+ std::unique_lock l{m_lock};
if (!m_manual_stop) {
if (refresh_pools) {
update_pool_replayers(m_local_cluster_watcher->get_pool_peers());
}
m_cache_manager_handler->run_cache_manager();
}
- m_cond.WaitInterval(m_lock, {1, 0});
+ m_cond.wait_for(l, 1s);
}
// stop all pool replayers in parallel
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
for (auto &pool_replayer : m_pool_replayers) {
pool_replayer.second->stop(false);
}
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
void Mirror::start()
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
void Mirror::stop()
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
void Mirror::restart()
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
void Mirror::flush()
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping || m_manual_stop) {
return;
void Mirror::release_leader()
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
void Mirror::update_pool_replayers(const PoolPeers &pool_peers)
{
dout(20) << "enter" << dendl;
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
// remove stale pool replayers before creating new pool replayers
for (auto it = m_pool_replayers.begin(); it != m_pool_replayers.end();) {
#define CEPH_RBD_MIRROR_H
#include "common/ceph_context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "include/utime.h"
#include "ClusterWatcher.h"
CephContext *m_cct;
std::vector<const char*> m_args;
Threads<librbd::ImageCtx> *m_threads = nullptr;
- Mutex m_lock;
- Cond m_cond;
+ ceph::mutex m_lock = ceph::make_mutex("rbd::mirror::Mirror");
+ ceph::condition_variable m_cond;
RadosRef m_local;
std::unique_ptr<ServiceDaemon<librbd::ImageCtx>> m_service_daemon;
m_local_pool_id(local_pool_id),
m_peer(peer),
m_args(args),
- m_lock(stringify("rbd::mirror::PoolReplayer ") + stringify(peer)),
+ m_lock(ceph::make_mutex(stringify("rbd::mirror::PoolReplayer ") + stringify(peer))),
m_local_pool_watcher_listener(this, true),
m_remote_pool_watcher_listener(this, false),
m_image_map_listener(this),
template <typename I>
bool PoolReplayer<I>::is_blacklisted() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_blacklisted;
}
template <typename I>
bool PoolReplayer<I>::is_leader() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_leader_watcher && m_leader_watcher->is_leader();
}
void PoolReplayer<I>::shut_down() {
m_stopping = true;
{
- Mutex::Locker l(m_lock);
- m_cond.Signal();
+ std::lock_guard l{m_lock};
+ m_cond.notify_all();
}
if (m_pool_replayer_thread.is_started()) {
m_pool_replayer_thread.join();
m_asok_hook_name, this);
}
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
if ((m_local_pool_watcher && m_local_pool_watcher->is_blacklisted()) ||
(m_remote_pool_watcher && m_remote_pool_watcher->is_blacklisted())) {
m_blacklisted = true;
}
if (!m_stopping) {
- m_cond.WaitInterval(m_lock, utime_t(1, 0));
+ m_cond.wait_for(locker, 1s);
}
}
return;
}
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
f->open_object_section("pool_replayer_status");
f->dump_string("pool", m_local_io_ctx.get_pool_name());
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
{
dout(20) << "enter: manual=" << manual << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (!manual) {
m_stopping = true;
- m_cond.Signal();
+ m_cond.notify_all();
return;
} else if (m_stopping) {
return;
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping) {
return;
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping || m_manual_stop) {
return;
{
dout(20) << "enter" << dendl;
- Mutex::Locker l(m_lock);
+ std::lock_guard l{m_lock};
if (m_stopping || !m_leader_watcher) {
return;
dout(10) << "mirror_uuid=" << mirror_uuid << ", "
<< "added_count=" << added_image_ids.size() << ", "
<< "removed_count=" << removed_image_ids.size() << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_leader_watcher->is_leader()) {
return;
}
void PoolReplayer<I>::init_image_map(Context *on_finish) {
dout(5) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_image_map);
m_image_map.reset(ImageMap<I>::create(m_local_io_ctx, m_threads,
m_instance_watcher->get_instance_id(),
void PoolReplayer<I>::init_local_pool_watcher(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_local_pool_watcher);
m_local_pool_watcher.reset(PoolWatcher<I>::create(
m_threads, m_local_io_ctx, m_local_pool_watcher_listener));
void PoolReplayer<I>::init_remote_pool_watcher(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_remote_pool_watcher);
m_remote_pool_watcher.reset(PoolWatcher<I>::create(
m_threads, m_remote_io_ctx, m_remote_pool_watcher_listener));
void PoolReplayer<I>::init_image_deleter(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_image_deleter);
on_finish = new FunctionContext([this, on_finish](int r) {
on_finish->complete(0);
- Mutex::Locker locker(m_lock);
- m_cond.Signal();
+ std::lock_guard locker{m_lock};
+ m_cond.notify_all();
}
template <typename I>
void PoolReplayer<I>::shut_down_image_deleter(Context* on_finish) {
dout(10) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_image_deleter) {
Context *ctx = new FunctionContext([this, on_finish](int r) {
handle_shut_down_image_deleter(r, on_finish);
dout(10) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_deleter);
m_image_deleter.reset();
}
dout(10) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_local_pool_watcher) {
Context *ctx = new FunctionContext([this, on_finish](int r) {
handle_shut_down_pool_watchers(r, on_finish);
dout(10) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_local_pool_watcher);
m_local_pool_watcher.reset();
void PoolReplayer<I>::wait_for_update_ops(Context *on_finish) {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
Context *ctx = new FunctionContext([this, on_finish](int r) {
handle_wait_for_update_ops(r, on_finish);
dout(5) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_image_map) {
on_finish = new FunctionContext([this, on_finish](int r) {
handle_shut_down_image_map(r, on_finish);
derr << "failed to shut down image map: " << cpp_strerror(r) << dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_map);
m_image_map.reset();
template <typename I>
void PoolReplayer<I>::handle_instances_added(const InstanceIds &instance_ids) {
dout(5) << "instance_ids=" << instance_ids << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_leader_watcher->is_leader()) {
return;
}
void PoolReplayer<I>::handle_instances_removed(
const InstanceIds &instance_ids) {
dout(5) << "instance_ids=" << instance_ids << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (!m_leader_watcher->is_leader()) {
return;
}
#define CEPH_RBD_MIRROR_POOL_REPLAYER_H
#include "common/AsyncOpTracker.h"
-#include "common/Cond.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "common/WorkQueue.h"
#include "include/rados/librados.hpp"
PeerSpec m_peer;
std::vector<const char*> m_args;
- mutable Mutex m_lock;
- Cond m_cond;
+ mutable ceph::mutex m_lock;
+ ceph::condition_variable m_cond;
std::atomic<bool> m_stopping = { false };
bool m_manual_stop = false;
bool m_blacklisted = false;
PoolWatcher<I>::PoolWatcher(Threads<I> *threads, librados::IoCtx &remote_io_ctx,
pool_watcher::Listener &listener)
: m_threads(threads), m_remote_io_ctx(remote_io_ctx), m_listener(listener),
- m_lock(librbd::util::unique_lock_name("rbd::mirror::PoolWatcher", this)) {
+ m_lock(ceph::make_mutex(librbd::util::unique_lock_name("rbd::mirror::PoolWatcher", this))) {
m_mirroring_watcher = new MirroringWatcher(m_remote_io_ctx,
m_threads->work_queue, this);
}
template <typename I>
bool PoolWatcher<I>::is_blacklisted() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_blacklisted;
}
dout(5) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_on_init_finish = on_finish;
ceph_assert(!m_refresh_in_progress);
dout(5) << dendl;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(!m_shutting_down);
m_shutting_down = true;
template <typename I>
void PoolWatcher<I>::register_watcher() {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_ids_invalid);
ceph_assert(m_refresh_in_progress);
}
dout(5) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_ids_invalid);
ceph_assert(m_refresh_in_progress);
if (r < 0) {
} else if (r == -EBLACKLISTED) {
dout(0) << "detected client is blacklisted" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_blacklisted = true;
std::swap(on_init_finish, m_on_init_finish);
} else if (r == -ENOENT) {
dout(5) << "mirroring directory does not exist" << dendl;
schedule_refresh_images(30);
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_init_finish, m_on_init_finish);
} else {
derr << "unexpected error registering mirroring directory watch: "
dout(5) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_ids_invalid);
ceph_assert(m_refresh_in_progress);
bool retry_refresh = false;
Context *on_init_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_ids_invalid);
ceph_assert(m_refresh_in_progress);
bool retry_refresh = false;
Context *on_init_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_image_ids_invalid);
ceph_assert(m_refresh_in_progress);
m_refresh_in_progress = false;
template <typename I>
void PoolWatcher<I>::schedule_refresh_images(double interval) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (m_shutting_down || m_refresh_in_progress || m_timer_ctx != nullptr) {
if (m_refresh_in_progress && !m_deferred_refresh) {
dout(5) << "deferring refresh until in-flight refresh completes" << dendl;
if (r == -EBLACKLISTED) {
dout(0) << "detected client is blacklisted" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_blacklisted = true;
return;
} else if (r == -ENOENT) {
<< "global_image_id=" << global_image_id << ", "
<< "enabled=" << enabled << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ImageId image_id(global_image_id, remote_image_id);
m_pending_added_image_ids.erase(image_id);
m_pending_removed_image_ids.erase(image_id);
template <typename I>
void PoolWatcher<I>::process_refresh_images() {
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_refresh_in_progress);
m_refresh_in_progress = true;
m_deferred_refresh = false;
template <typename I>
void PoolWatcher<I>::schedule_listener() {
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
m_pending_updates = true;
if (m_shutting_down || m_image_ids_invalid || m_notify_listener_in_progress) {
return;
ImageIds added_image_ids;
ImageIds removed_image_ids;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_notify_listener_in_progress);
// if the mirror uuid is updated, treat it as the removal of all
}
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_notify_listener_in_progress);
// if the watch failed while we didn't own the lock, we are going
std::move(removed_image_ids));
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_notify_listener_in_progress = false;
if (m_pending_updates) {
schedule_listener();
#include "common/AsyncOpTracker.h"
#include "common/ceph_context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include "tools/rbd_mirror/Types.h"
#include <boost/functional/hash.hpp>
void shut_down(Context *on_finish);
inline uint64_t get_image_count() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return m_image_ids.size();
}
ImageIds m_refresh_image_ids;
bufferlist m_out_bl;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
template <typename I>
ServiceDaemon<I>::ServiceDaemon(CephContext *cct, RadosRef rados,
Threads<I>* threads)
- : m_cct(cct), m_rados(rados), m_threads(threads),
- m_lock("rbd::mirror::ServiceDaemon") {
+ : m_cct(cct), m_rados(rados), m_threads(threads) {
dout(20) << dendl;
}
template <typename I>
ServiceDaemon<I>::~ServiceDaemon() {
dout(20) << dendl;
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
if (m_timer_ctx != nullptr) {
m_threads->timer->cancel_event(m_timer_ctx);
update_status();
dout(20) << "pool_id=" << pool_id << ", pool_name=" << pool_name << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_pools.insert({pool_id, {pool_name}});
}
schedule_update_status();
void ServiceDaemon<I>::remove_pool(int64_t pool_id) {
dout(20) << "pool_id=" << pool_id << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_pools.erase(pool_id);
}
schedule_update_status();
<< "text=" << text << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto pool_it = m_pools.find(pool_id);
if (pool_it == m_pools.end()) {
return CALLOUT_ID_NONE;
<< "callout_id=" << callout_id << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto pool_it = m_pools.find(pool_id);
if (pool_it == m_pools.end()) {
return;
<< "value=" << value << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto pool_it = m_pools.find(pool_id);
if (pool_it == m_pools.end()) {
return;
<< "key=" << key << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
auto pool_it = m_pools.find(pool_id);
if (pool_it == m_pools.end()) {
return;
template <typename I>
void ServiceDaemon<I>::schedule_update_status() {
- Mutex::Locker timer_locker(m_threads->timer_lock);
+ std::lock_guard timer_locker{m_threads->timer_lock};
if (m_timer_ctx != nullptr) {
return;
}
template <typename I>
void ServiceDaemon<I>::update_status() {
dout(20) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
ceph::JSONFormatter f;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
f.open_object_section("pools");
for (auto& pool_pair : m_pools) {
f.open_object_section(stringify(pool_pair.first).c_str());
#ifndef CEPH_RBD_MIRROR_SERVICE_DAEMON_H
#define CEPH_RBD_MIRROR_SERVICE_DAEMON_H
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/service_daemon/Types.h"
#include <map>
RadosRef m_rados;
Threads<ImageCtxT>* m_threads;
- Mutex m_lock;
+ ceph::mutex m_lock = ceph::make_mutex("rbd::mirror::ServiceDaemon");
Pools m_pools;
uint64_t m_callout_id = service_daemon::CALLOUT_ID_NONE;
namespace mirror {
template <typename I>
-Threads<I>::Threads(CephContext *cct) : timer_lock("Threads::timer_lock") {
+Threads<I>::Threads(CephContext *cct) {
thread_pool = new ThreadPool(cct, "Journaler::thread_pool", "tp_journal",
cct->_conf.get_val<uint64_t>("rbd_op_threads"),
"rbd_op_threads");
template <typename I>
Threads<I>::~Threads() {
{
- Mutex::Locker timer_locker(timer_lock);
+ std::lock_guard timer_locker{timer_lock};
timer->shutdown();
}
delete timer;
#ifndef CEPH_RBD_MIRROR_THREADS_H
#define CEPH_RBD_MIRROR_THREADS_H
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
class CephContext;
class ContextWQ;
ContextWQ *work_queue = nullptr;
SafeTimer *timer = nullptr;
- Mutex timer_lock;
+ ceph::mutex timer_lock =
+ ceph::make_mutex("Threads::timer_lock");
explicit Threads(CephContext *cct);
Threads(const Threads&) = delete;
m_image_ctx = I::create("", m_image_id, nullptr, m_io_ctx, false);
{
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->set_journal_policy(new JournalPolicy());
}
void SnapshotPurgeRequest<I>::acquire_lock() {
dout(10) << dendl;
- m_image_ctx->owner_lock.get_read();
+ m_image_ctx->owner_lock.lock_shared();
if (m_image_ctx->exclusive_lock == nullptr) {
- m_image_ctx->owner_lock.put_read();
+ m_image_ctx->owner_lock.unlock_shared();
derr << "exclusive lock not enabled" << dendl;
m_ret_val = -EINVAL;
m_image_ctx->exclusive_lock->acquire_lock(create_context_callback<
SnapshotPurgeRequest<I>, &SnapshotPurgeRequest<I>::handle_acquire_lock>(
this));
- m_image_ctx->owner_lock.put_read();
+ m_image_ctx->owner_lock.unlock_shared();
}
template <typename I>
}
{
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock image_locker{m_image_ctx->image_lock};
m_snaps = m_image_ctx->snaps;
}
snap_unprotect();
}
librados::snap_t snap_id = m_snaps.back();
- m_image_ctx->image_lock.get_read();
+ m_image_ctx->image_lock.lock_shared();
int r = m_image_ctx->get_snap_namespace(snap_id, &m_snap_namespace);
if (r < 0) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
derr << "failed to get snap namespace: " << cpp_strerror(r) << dendl;
m_ret_val = r;
r = m_image_ctx->get_snap_name(snap_id, &m_snap_name);
if (r < 0) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
derr << "failed to get snap name: " << cpp_strerror(r) << dendl;
m_ret_val = r;
bool is_protected;
r = m_image_ctx->is_snap_protected(snap_id, &is_protected);
if (r < 0) {
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
derr << "failed to get snap protection status: " << cpp_strerror(r)
<< dendl;
close_image();
return;
}
- m_image_ctx->image_lock.put_read();
+ m_image_ctx->image_lock.unlock_shared();
if (!is_protected) {
snap_remove();
handle_snap_unprotect(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
m_image_ctx->operations->execute_snap_unprotect(
m_snap_namespace, m_snap_name.c_str(), ctx);
}
{
// avoid the need to refresh to delete the newly unprotected snapshot
- RWLock::RLocker image_locker(m_image_ctx->image_lock);
+ std::shared_lock image_locker{m_image_ctx->image_lock};
librados::snap_t snap_id = m_snaps.back();
auto snap_info_it = m_image_ctx->snap_info.find(snap_id);
if (snap_info_it != m_image_ctx->snap_info.end()) {
handle_snap_remove(r);
finish_op_ctx->complete(0);
});
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
m_image_ctx->operations->execute_snap_remove(
m_snap_namespace, m_snap_name.c_str(), ctx);
}
template <typename I>
Context *SnapshotPurgeRequest<I>::start_lock_op(int* r) {
- RWLock::RLocker owner_locker(m_image_ctx->owner_lock);
+ std::shared_lock owner_locker{m_image_ctx->owner_lock};
return m_image_ctx->exclusive_lock->start_op(r);
}
{
// don't attempt to open the journal
- RWLock::WLocker image_locker(m_image_ctx->image_lock);
+ std::unique_lock image_locker{m_image_ctx->image_lock};
m_image_ctx->set_journal_policy(new JournalPolicy());
}
template <typename I>
void TrashMoveRequest<I>::acquire_lock() {
- m_image_ctx->owner_lock.get_read();
+ m_image_ctx->owner_lock.lock_shared();
if (m_image_ctx->exclusive_lock == nullptr) {
derr << "exclusive lock feature not enabled" << dendl;
- m_image_ctx->owner_lock.put_read();
+ m_image_ctx->owner_lock.unlock_shared();
m_ret_val = -EINVAL;
close_image();
return;
TrashMoveRequest<I>, &TrashMoveRequest<I>::handle_acquire_lock>(this);
m_image_ctx->exclusive_lock->block_requests(0);
m_image_ctx->exclusive_lock->acquire_lock(ctx);
- m_image_ctx->owner_lock.put_read();
+ m_image_ctx->owner_lock.unlock_shared();
}
template <typename I>
TrashListener& trash_listener)
: librbd::TrashWatcher<I>(io_ctx, threads->work_queue),
m_io_ctx(io_ctx), m_threads(threads), m_trash_listener(trash_listener),
- m_lock(librbd::util::unique_lock_name(
- "rbd::mirror::image_deleter::TrashWatcher", this)) {
+ m_lock(ceph::make_mutex(librbd::util::unique_lock_name(
+ "rbd::mirror::image_deleter::TrashWatcher", this))) {
}
template <typename I>
dout(5) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_on_init_finish = on_finish;
ceph_assert(!m_trash_list_in_progress);
dout(5) << dendl;
{
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
ceph_assert(!m_shutting_down);
m_shutting_down = true;
const cls::rbd::TrashImageSpec& spec) {
dout(10) << "image_id=" << image_id << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
add_image(image_id, spec);
}
void TrashWatcher<I>::create_trash() {
dout(20) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
}
void TrashWatcher<I>::handle_create_trash(int r) {
dout(20) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
}
dout(0) << "detected pool no longer exists" << dendl;
}
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_init_finish, m_on_init_finish);
m_trash_list_in_progress = false;
} else if (r < 0 && r != -EEXIST) {
derr << "failed to create trash object: " << cpp_strerror(r) << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_trash_list_in_progress = false;
}
template <typename I>
void TrashWatcher<I>::register_watcher() {
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
}
dout(5) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
if (r < 0) {
m_trash_list_in_progress = false;
} else if (r == -EBLACKLISTED) {
dout(0) << "detected client is blacklisted" << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(on_init_finish, m_on_init_finish);
} else {
derr << "unexpected error registering trash directory watch: "
dout(5) << "last_image_id=" << m_last_image_id << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
}
Context *on_init_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_trash_list_in_progress);
if (r >= 0) {
for (auto& image : images) {
template <typename I>
void TrashWatcher<I>::schedule_trash_list(double interval) {
- Mutex::Locker timer_locker(m_threads->timer_lock);
- Mutex::Locker locker(m_lock);
+ std::scoped_lock locker{m_threads->timer_lock, m_lock};
if (m_shutting_down || m_trash_list_in_progress || m_timer_ctx != nullptr) {
if (m_trash_list_in_progress && !m_deferred_trash_list) {
dout(5) << "deferring refresh until in-flight refresh completes" << dendl;
void TrashWatcher<I>::process_trash_list() {
dout(5) << dendl;
- ceph_assert(m_threads->timer_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock));
ceph_assert(m_timer_ctx != nullptr);
m_timer_ctx = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(!m_trash_list_in_progress);
m_trash_list_in_progress = true;
}
return;
}
- ceph_assert(m_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_lock));
auto& deferment_end_time = spec.deferment_end_time;
dout(10) << "image_id=" << image_id << ", "
<< "deferment_end_time=" << deferment_end_time << dendl;
m_async_op_tracker.start_op();
auto ctx = new FunctionContext([this, image_id, deferment_end_time](int r) {
- m_trash_listener.handle_trash_image(image_id, deferment_end_time);
+ m_trash_listener.handle_trash_image(image_id,
+ deferment_end_time.to_real_time());
m_async_op_tracker.finish_op();
});
m_threads->work_queue->queue(ctx, 0);
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "librbd/TrashWatcher.h"
#include <set>
#include <string>
std::string m_last_image_id;
bufferlist m_out_bl;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
Context *m_on_init_finish = nullptr;
Context *m_timer_ctx = nullptr;
}
virtual void handle_trash_image(const std::string& image_id,
- const utime_t& deferment_end_time) = 0;
+ const ceph::real_clock::time_point& deferment_end_time) = 0;
};
Policy::Policy(librados::IoCtx &ioctx)
: m_ioctx(ioctx),
- m_map_lock(unique_lock_name("rbd::mirror::image_map::Policy::m_map_lock",
- this)) {
+ m_map_lock(ceph::make_shared_mutex(
+ unique_lock_name("rbd::mirror::image_map::Policy::m_map_lock", this))) {
// map should at least have once instance
std::string instance_id = stringify(ioctx.get_instance_id());
const std::map<std::string, cls::rbd::MirrorImageMap> &image_mapping) {
dout(20) << dendl;
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
for (auto& it : image_mapping) {
ceph_assert(!it.second.instance_id.empty());
auto map_result = m_map[it.second.instance_id].emplace(it.first);
LookupInfo Policy::lookup(const std::string &global_image_id) {
dout(20) << "global_image_id=" << global_image_id << dendl;
- RWLock::RLocker map_lock(m_map_lock);
+ std::shared_lock map_lock{m_map_lock};
LookupInfo info;
auto it = m_image_states.find(global_image_id);
bool Policy::add_image(const std::string &global_image_id) {
dout(5) << "global_image_id=" << global_image_id << dendl;
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
auto image_state_result = m_image_states.emplace(global_image_id,
ImageState{});
auto& image_state = image_state_result.first->second;
bool Policy::remove_image(const std::string &global_image_id) {
dout(5) << "global_image_id=" << global_image_id << dendl;
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
auto it = m_image_states.find(global_image_id);
if (it == m_image_states.end()) {
return false;
GlobalImageIds* global_image_ids) {
dout(5) << "instance_ids=" << instance_ids << dendl;
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
for (auto& instance : instance_ids) {
ceph_assert(!instance.empty());
m_map.emplace(instance, std::set<std::string>{});
void Policy::remove_instances(const InstanceIds &instance_ids,
GlobalImageIds* global_image_ids) {
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
remove_instances(m_map_lock, instance_ids, global_image_ids);
}
-void Policy::remove_instances(const RWLock& lock,
+void Policy::remove_instances(const ceph::shared_mutex& lock,
const InstanceIds &instance_ids,
GlobalImageIds* global_image_ids) {
- ceph_assert(m_map_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_map_lock));
dout(5) << "instance_ids=" << instance_ids << dendl;
for (auto& instance_id : instance_ids) {
}
ActionType Policy::start_action(const std::string &global_image_id) {
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
auto it = m_image_states.find(global_image_id);
ceph_assert(it != m_image_states.end());
}
bool Policy::finish_action(const std::string &global_image_id, int r) {
- RWLock::WLocker map_lock(m_map_lock);
+ std::unique_lock map_lock{m_map_lock};
auto it = m_image_states.find(global_image_id);
ceph_assert(it != m_image_states.end());
}
void Policy::map(const std::string& global_image_id, ImageState* image_state) {
- ceph_assert(m_map_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_map_lock));
std::string instance_id = image_state->instance_id;
if (instance_id != UNMAPPED_INSTANCE_ID && !is_dead_instance(instance_id)) {
void Policy::unmap(const std::string &global_image_id,
ImageState* image_state) {
- ceph_assert(m_map_lock.is_wlocked());
+ ceph_assert(ceph_mutex_is_wlocked(m_map_lock));
std::string instance_id = image_state->instance_id;
if (instance_id == UNMAPPED_INSTANCE_ID) {
}
bool Policy::is_image_shuffling(const std::string &global_image_id) {
- ceph_assert(m_map_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_map_lock));
auto it = m_image_states.find(global_image_id);
ceph_assert(it != m_image_states.end());
}
bool Policy::can_shuffle_image(const std::string &global_image_id) {
- ceph_assert(m_map_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_map_lock));
CephContext *cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
int migration_throttle = cct->_conf.get_val<uint64_t>(
typedef std::map<std::string, std::set<std::string> > InstanceToImageMap;
bool is_dead_instance(const std::string instance_id) {
- ceph_assert(m_map_lock.is_locked());
+ ceph_assert(ceph_mutex_is_locked(m_map_lock));
return m_dead_instances.find(instance_id) != m_dead_instances.end();
}
librados::IoCtx &m_ioctx;
- RWLock m_map_lock; // protects m_map
+ ceph::shared_mutex m_map_lock; // protects m_map
InstanceToImageMap m_map; // instance_id -> global_id map
ImageStates m_image_states;
bool m_initial_update = true;
- void remove_instances(const RWLock& lock, const InstanceIds &instance_ids,
+ void remove_instances(const ceph::shared_mutex& lock,
+ const InstanceIds &instance_ids,
GlobalImageIds* global_image_ids);
bool set_state(ImageState* image_state, StateTransition::State state,
m_remote_mirror_uuid(remote_mirror_uuid), m_journaler(journaler),
m_client_state(client_state), m_client_meta(client_meta),
m_progress_ctx(progress_ctx), m_do_resync(do_resync),
- m_lock(unique_lock_name("BootstrapRequest::m_lock", this)) {
+ m_lock(ceph::make_mutex(unique_lock_name("BootstrapRequest::m_lock", this))) {
dout(10) << dendl;
}
template <typename I>
bool BootstrapRequest<I>::is_syncing() const {
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
return (m_image_sync != nullptr);
}
void BootstrapRequest<I>::cancel() {
dout(10) << dendl;
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_canceled = true;
if (m_image_sync != nullptr) {
I *local_image_ctx = (*m_local_image_ctx);
{
- local_image_ctx->image_lock.get_read();
+ local_image_ctx->image_lock.lock_shared();
if (local_image_ctx->journal == nullptr) {
- local_image_ctx->image_lock.put_read();
+ local_image_ctx->image_lock.unlock_shared();
derr << "local image does not support journaling" << dendl;
m_ret_val = -EINVAL;
r = (*m_local_image_ctx)->journal->is_resync_requested(m_do_resync);
if (r < 0) {
- local_image_ctx->image_lock.put_read();
+ local_image_ctx->image_lock.unlock_shared();
derr << "failed to check if a resync was requested" << dendl;
m_ret_val = r;
m_local_tag_data = local_image_ctx->journal->get_tag_data();
dout(10) << "local tag=" << m_local_tag_tid << ", "
<< "local tag data=" << m_local_tag_data << dendl;
- local_image_ctx->image_lock.put_read();
+ local_image_ctx->image_lock.unlock_shared();
}
if (m_local_tag_data.mirror_uuid != m_remote_mirror_uuid && !m_primary) {
dout(15) << "local_image_id=" << m_local_image_id << dendl;
update_progress("CREATE_LOCAL_IMAGE");
- m_remote_image_ctx->image_lock.get_read();
+ m_remote_image_ctx->image_lock.lock_shared();
std::string image_name = m_remote_image_ctx->name;
- m_remote_image_ctx->image_lock.put_read();
+ m_remote_image_ctx->image_lock.unlock_shared();
Context *ctx = create_context_callback<
BootstrapRequest<I>, &BootstrapRequest<I>::handle_create_local_image>(
}
{
- Mutex::Locker locker(m_lock);
+ std::unique_lock locker{m_lock};
if (m_canceled) {
m_ret_val = -ECANCELED;
} else {
m_image_sync->get();
- m_lock.Unlock();
+ locker.unlock();
update_progress("IMAGE_SYNC");
- m_lock.Lock();
+ locker.lock();
m_image_sync->send();
return;
dout(15) << "r=" << r << dendl;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
m_image_sync->put();
m_image_sync = nullptr;
#include "include/int_types.h"
#include "include/rados/librados.hpp"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
class Context;
class ContextWQ;
-class Mutex;
class SafeTimer;
namespace journal { class Journaler; }
namespace librbd { class ImageCtx; }
ProgressContext *m_progress_ctx;
bool *m_do_resync;
- mutable Mutex m_lock;
+ mutable ceph::mutex m_lock;
bool m_canceled = false;
Tags m_remote_tags;
Context *ctx = create_context_callback<
klass, &klass::handle_create_image>(this);
- RWLock::RLocker image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock image_locker{m_remote_image_ctx->image_lock};
auto& config{
reinterpret_cast<CephContext*>(m_local_io_ctx.cct())->_conf};
std::string snap_name;
cls::rbd::SnapshotNamespace snap_namespace;
{
- RWLock::RLocker remote_image_locker(m_remote_parent_image_ctx->image_lock);
+ std::shared_lock remote_image_locker(m_remote_parent_image_ctx->image_lock);
auto it = m_remote_parent_image_ctx->snap_info.find(
m_remote_parent_spec.snap_id);
if (it != m_remote_parent_image_ctx->snap_info.end()) {
template <typename I>
int CreateImageRequest<I>::validate_parent() {
- RWLock::RLocker owner_locker(m_remote_image_ctx->owner_lock);
- RWLock::RLocker image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
+ std::shared_lock image_locker{m_remote_image_ctx->image_lock};
m_remote_parent_spec = m_remote_image_ctx->parent_md.spec;
bool EventPreprocessor<I>::prune_snap_map(SnapSeqs *snap_seqs) {
bool pruned = false;
- RWLock::RLocker image_locker(m_local_image_ctx.image_lock);
+ std::shared_lock image_locker{m_local_image_ctx.image_lock};
for (auto it = snap_seqs->begin(); it != snap_seqs->end(); ) {
auto current_it(it++);
if (m_local_image_ctx.snap_info.count(current_it->second) == 0) {
int lock_requested(bool force) override {
int r = -EROFS;
{
- RWLock::RLocker owner_locker(image_ctx->owner_lock);
- RWLock::RLocker image_locker(image_ctx->image_lock);
+ std::shared_lock owner_locker{image_ctx->owner_lock};
+ std::shared_lock image_locker{image_ctx->image_lock};
if (image_ctx->journal == nullptr || image_ctx->journal->is_tag_owner()) {
r = 0;
}
*m_local_image_ctx = I::create("", m_local_image_id, nullptr,
m_local_io_ctx, false);
{
- RWLock::WLocker owner_locker((*m_local_image_ctx)->owner_lock);
- RWLock::WLocker image_locker((*m_local_image_ctx)->image_lock);
+ std::scoped_lock locker{(*m_local_image_ctx)->owner_lock,
+ (*m_local_image_ctx)->image_lock};
(*m_local_image_ctx)->set_exclusive_lock_policy(
new MirrorExclusiveLockPolicy<I>(*m_local_image_ctx));
(*m_local_image_ctx)->set_journal_policy(
void OpenLocalImageRequest<I>::send_lock_image() {
dout(20) << dendl;
- RWLock::RLocker owner_locker((*m_local_image_ctx)->owner_lock);
+ std::shared_lock owner_locker{(*m_local_image_ctx)->owner_lock};
if ((*m_local_image_ctx)->exclusive_lock == nullptr) {
derr << ": image does not support exclusive lock" << dendl;
send_close_image(-EINVAL);
}
{
- RWLock::RLocker owner_locker((*m_local_image_ctx)->owner_lock);
+ std::shared_lock owner_locker{(*m_local_image_ctx)->owner_lock};
if ((*m_local_image_ctx)->exclusive_lock == nullptr ||
!(*m_local_image_ctx)->exclusive_lock->is_lock_owner()) {
derr << ": image is not locked" << dendl;
const std::string &mirror_uuid)
: m_journaler(journaler),
m_mirror_uuid(mirror_uuid),
- m_lock(unique_lock_name("ReplayStatusFormatter::m_lock", this)) {
+ m_lock(ceph::make_mutex(unique_lock_name("ReplayStatusFormatter::m_lock", this))) {
}
template <typename I>
bool in_progress = false;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
if (m_on_finish) {
in_progress = true;
} else {
format(description);
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
ceph_assert(m_on_finish == on_finish);
m_on_finish = nullptr;
}
m_tag_cache.find(master_tag_tid) != m_tag_cache.end()) {
Context *on_finish = nullptr;
{
- Mutex::Locker locker(m_lock);
+ std::lock_guard locker{m_lock};
std::swap(m_on_finish, on_finish);
}
#define RBD_MIRROR_IMAGE_REPLAYER_REPLAY_STATUS_FORMATTER_H
#include "include/Context.h"
-#include "common/Mutex.h"
+#include "common/ceph_mutex.h"
#include "cls/journal/cls_journal_types.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
private:
Journaler *m_journaler;
std::string m_mirror_uuid;
- Mutex m_lock;
+ ceph::mutex m_lock;
Context *m_on_finish = nullptr;
cls::journal::ObjectPosition m_master_position;
cls::journal::ObjectPosition m_mirror_position;
} else {
// if we have more than one sync point or invalid sync points,
// trim them off
- RWLock::RLocker image_locker(m_remote_image_ctx->image_lock);
+ std::shared_lock image_locker{m_remote_image_ctx->image_lock};
std::set<std::string> snap_names;
for (auto it = m_client_meta_copy.sync_points.rbegin();
it != m_client_meta_copy.sync_points.rend(); ++it) {