From 489b30844e86dab102f768796251783b6e4a5f8f Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Wed, 14 Aug 2019 11:23:42 -0700 Subject: [PATCH] include: convert FunctionContext usage to generic LambdaContext The main motivation for this change is to avoid copies due to the use of boost::function/std::function where captures of std::unique_ptr (in subsequent commits) would fail to compile. Signed-off-by: Patrick Donnelly --- src/client/Client.cc | 2 +- src/common/Throttle.cc | 2 +- src/common/Throttle.h | 4 +- src/crimson/osd/pg.h | 4 +- src/include/Context.h | 25 +++------ src/journal/JournalMetadata.cc | 16 +++--- src/journal/JournalPlayer.cc | 4 +- src/journal/JournalRecorder.cc | 4 +- src/journal/JournalTrimmer.cc | 4 +- src/journal/Journaler.cc | 15 ++--- src/journal/ObjectPlayer.cc | 2 +- src/journal/ObjectRecorder.cc | 2 +- src/librbd/DeepCopyRequest.cc | 4 +- src/librbd/ExclusiveLock.cc | 8 +-- src/librbd/ImageState.cc | 4 +- src/librbd/ImageWatcher.cc | 20 +++---- src/librbd/Journal.cc | 28 +++++----- src/librbd/Journal.h | 2 +- src/librbd/ManagedLock.cc | 20 +++---- src/librbd/ObjectMap.cc | 2 +- src/librbd/Operations.cc | 4 +- src/librbd/Watcher.cc | 4 +- .../cache/ObjectCacherObjectDispatch.cc | 4 +- src/librbd/cache/ParentCacheObjectDispatch.cc | 8 +-- src/librbd/cache/WriteAroundObjectDispatch.cc | 4 +- src/librbd/deep_copy/ImageCopyRequest.cc | 2 +- src/librbd/deep_copy/ObjectCopyRequest.cc | 6 +- src/librbd/deep_copy/SetHeadRequest.cc | 8 +-- src/librbd/deep_copy/SnapshotCopyRequest.cc | 14 ++--- src/librbd/deep_copy/SnapshotCreateRequest.cc | 6 +- src/librbd/image/RemoveRequest.cc | 2 +- src/librbd/image/ValidatePoolRequest.cc | 4 +- src/librbd/internal.cc | 2 +- src/librbd/io/ImageRequest.cc | 2 +- src/librbd/io/ObjectDispatcher.cc | 6 +- src/librbd/io/ObjectRequest.cc | 2 +- .../io/SimpleSchedulerObjectDispatch.cc | 8 +-- src/librbd/journal/ObjectDispatch.cc | 2 +- src/librbd/journal/Replay.cc | 2 +- src/librbd/mirror/DisableRequest.cc | 4 +- src/mds/MDBalancer.cc | 6 +- src/mds/MDCache.cc | 16 +++--- src/mds/MDSDaemon.cc | 6 +- src/mds/MDSRank.cc | 32 +++++------ src/mds/OpenFileTable.cc | 4 +- src/mds/PurgeQueue.cc | 16 +++--- src/mds/Server.cc | 6 +- src/mgr/ActivePyModules.cc | 8 +-- src/mgr/BaseMgrModule.cc | 4 +- src/mgr/DaemonServer.cc | 8 +-- src/mgr/Mgr.cc | 4 +- src/mgr/MgrClient.cc | 4 +- src/mgr/MgrStandby.cc | 6 +- src/mgr/StandbyPyModules.cc | 2 +- src/mon/Elector.cc | 4 +- src/mon/MDSMonitor.cc | 4 +- src/mon/MgrMonitor.cc | 8 +-- src/mon/MonClient.cc | 4 +- src/mon/Monitor.cc | 54 ++++++++---------- src/mon/Monitor.h | 35 +++++++++--- src/mon/OSDMonitor.cc | 2 +- src/mon/Paxos.cc | 20 +++---- src/mon/PaxosService.cc | 4 +- src/mon/QuorumService.h | 4 +- src/osd/OSD.cc | 4 +- src/osd/PG.cc | 4 +- src/osd/PGBackend.cc | 2 +- src/osd/PrimaryLogPG.cc | 6 +- src/osdc/ObjectCacher.cc | 2 +- .../test_DomainSocket.cc | 2 +- .../test_multi_session.cc | 2 +- .../librados_test_stub/TestRadosClient.cc | 6 +- .../librados_test_stub/TestWatchNotify.cc | 12 ++-- .../cache/test_mock_ParentImageCache.cc | 14 ++--- .../deep_copy/test_mock_ObjectCopyRequest.cc | 3 +- .../deep_copy/test_mock_SetHeadRequest.cc | 3 +- .../test_mock_SnapshotCopyRequest.cc | 3 +- .../test_mock_SnapshotCreateRequest.cc | 3 +- src/test/librbd/journal/test_mock_Replay.cc | 2 +- .../librbd/operation/test_mock_Request.cc | 2 +- .../operation/test_mock_ResizeRequest.cc | 2 +- src/test/librbd/test_ObjectMap.cc | 2 +- src/test/librbd/test_mock_DeepCopyRequest.cc | 3 +- src/test/librbd/test_mock_Journal.cc | 2 +- src/test/librbd/test_mock_Watcher.cc | 4 +- .../watcher/test_mock_RewatchRequest.cc | 4 +- .../test_mock_SnapshotPurgeRequest.cc | 5 +- .../image_deleter/test_mock_TrashWatcher.cc | 2 +- src/test/rbd_mirror/test_mock_ImageMap.cc | 4 +- .../rbd_mirror/test_mock_InstanceReplayer.cc | 2 +- .../rbd_mirror/test_mock_InstanceWatcher.cc | 6 +- .../rbd_mirror/test_mock_LeaderWatcher.cc | 12 ++-- src/test/rbd_mirror/test_mock_PoolReplayer.cc | 6 +- src/test/rbd_mirror/test_mock_PoolWatcher.cc | 2 +- src/tools/ceph_objectstore_tool.cc | 2 +- .../immutable_object_cache/CacheClient.cc | 4 +- .../ObjectCacheStore.cc | 2 +- src/tools/rbd/action/MirrorPool.cc | 2 +- src/tools/rbd_mirror/ImageDeleter.cc | 14 ++--- src/tools/rbd_mirror/ImageMap.cc | 6 +- src/tools/rbd_mirror/ImageReplayer.cc | 56 +++++++++---------- src/tools/rbd_mirror/ImageSync.cc | 2 +- src/tools/rbd_mirror/InstanceReplayer.cc | 14 ++--- src/tools/rbd_mirror/InstanceWatcher.cc | 12 ++-- src/tools/rbd_mirror/Instances.cc | 6 +- src/tools/rbd_mirror/LeaderWatcher.cc | 10 ++-- src/tools/rbd_mirror/MirrorStatusWatcher.cc | 2 +- src/tools/rbd_mirror/NamespaceReplayer.cc | 24 ++++---- src/tools/rbd_mirror/PoolReplayer.cc | 10 ++-- src/tools/rbd_mirror/PoolReplayer.h | 4 +- src/tools/rbd_mirror/PoolWatcher.cc | 8 +-- src/tools/rbd_mirror/ServiceDaemon.cc | 2 +- .../image_deleter/SnapshotPurgeRequest.cc | 4 +- .../rbd_mirror/image_deleter/TrashWatcher.cc | 10 ++-- .../image_replayer/ReplayStatusFormatter.cc | 2 +- 115 files changed, 423 insertions(+), 426 deletions(-) diff --git a/src/client/Client.cc b/src/client/Client.cc index 23806511dddea..ef252ff5c89cc 100644 --- a/src/client/Client.cc +++ b/src/client/Client.cc @@ -6195,7 +6195,7 @@ void Client::tick() ldout(cct, 21) << "tick" << dendl; tick_event = timer.add_event_after( cct->_conf->client_tick_interval, - new FunctionContext([this](int) { + new LambdaContext([this](int) { // Called back via Timer, which takes client_lock for us ceph_assert(ceph_mutex_is_locked_by_me(client_lock)); tick(); diff --git a/src/common/Throttle.cc b/src/common/Throttle.cc index 70770fbe30128..50a835ee5f193 100644 --- a/src/common/Throttle.cc +++ b/src/common/Throttle.cc @@ -829,7 +829,7 @@ void TokenBucketThrottle::add_tokens() { } void TokenBucketThrottle::schedule_timer() { - m_token_ctx = new FunctionContext( + m_token_ctx = new LambdaContext( [this](int r) { schedule_timer(); }); diff --git a/src/common/Throttle.h b/src/common/Throttle.h index 5dfd92dc62d9c..c6537650451e7 100644 --- a/src/common/Throttle.h +++ b/src/common/Throttle.h @@ -363,7 +363,7 @@ class TokenBucketThrottle { uint64_t m_burst = 0; SafeTimer *m_timer; ceph::mutex *m_timer_lock; - FunctionContext *m_token_ctx = nullptr; + Context *m_token_ctx = nullptr; std::list m_blockers; ceph::mutex m_lock; @@ -419,7 +419,7 @@ public: template void add_blocker(uint64_t c, T *handler, I *item, uint64_t flag) { - Context *ctx = new FunctionContext([handler, item, flag](int r) { + Context *ctx = new LambdaContext([handler, item, flag](int r) { (handler->*MF)(r, item, flag); }); m_blockers.emplace_back(c, ctx); diff --git a/src/crimson/osd/pg.h b/src/crimson/osd/pg.h index 3111c9b30c778..e6aeff7e72f7b 100644 --- a/src/crimson/osd/pg.h +++ b/src/crimson/osd/pg.h @@ -170,7 +170,7 @@ public: void start_flush_on_transaction( ObjectStore::Transaction &t) final { t.register_on_commit( - new LambdaContext([this](){ + new LambdaContext([this](int r){ peering_state.complete_flush(); })); } @@ -217,7 +217,7 @@ public: PGPeeringEventRef on_commit) final { t.register_on_commit( new LambdaContext( - [this, on_commit=std::move(on_commit)] { + [this, on_commit=std::move(on_commit)](int r){ shard_services.start_operation( this, shard_services, diff --git a/src/include/Context.h b/src/include/Context.h index 8a1d8e8516be7..6d39be55ba1a2 100644 --- a/src/include/Context.h +++ b/src/include/Context.h @@ -122,13 +122,16 @@ struct RunOnDelete { typedef std::shared_ptr RunOnDeleteRef; template -struct LambdaContext : public Context { - T t; +class LambdaContext : public Context { +public: LambdaContext(T &&t) : t(std::forward(t)) {} - void finish(int) override { - t(); + void finish(int r) override { + t(r); } +private: + T t; }; + template LambdaContext *make_lambda_context(T &&t) { return new LambdaContext(std::move(t)); @@ -473,20 +476,6 @@ private: typedef C_GatherBase C_Gather; typedef C_GatherBuilderBase C_GatherBuilder; -class FunctionContext : public Context { -public: - FunctionContext(boost::function &&callback) - : m_callback(std::move(callback)) - { - } - - void finish(int r) override { - m_callback(r); - } -private: - boost::function m_callback; -}; - template class ContextFactory { public: diff --git a/src/journal/JournalMetadata.cc b/src/journal/JournalMetadata.cc index 7fcad836c19c2..bf9c21be1748d 100644 --- a/src/journal/JournalMetadata.cc +++ b/src/journal/JournalMetadata.cc @@ -433,7 +433,7 @@ void JournalMetadata::init(Context *on_finish) { on_finish = utils::create_async_context_callback( this, on_finish); on_finish = new C_ImmutableMetadata(this, on_finish); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { if (r < 0) { lderr(m_cct) << __func__ << ": failed to watch journal" << cpp_strerror(r) << dendl; @@ -467,11 +467,11 @@ void JournalMetadata::shut_down(Context *on_finish) { // chain the shut down sequence (reverse order) on_finish = utils::create_async_context_callback( this, on_finish); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { ldout(m_cct, 20) << "shut_down: waiting for ops" << dendl; m_async_op_tracker.wait_for_ops(on_finish); }); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { ldout(m_cct, 20) << "shut_down: flushing watch" << dendl; librados::Rados rados(m_ioctx); librados::AioCompletion *comp = librados::Rados::aio_create_completion( @@ -480,7 +480,7 @@ void JournalMetadata::shut_down(Context *on_finish) { ceph_assert(r == 0); comp->release(); }); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { flush_commit_position(on_finish); }); if (watch_handle != 0) { @@ -835,7 +835,7 @@ void JournalMetadata::handle_commit_position_task() { m_async_op_tracker.start_op(); ++m_flush_commits_in_progress; - Context* ctx = new FunctionContext([this, commit_position_ctx](int r) { + Context* ctx = new LambdaContext([this, commit_position_ctx](int r) { Contexts flush_commit_position_ctxs; m_lock.lock(); ceph_assert(m_flush_commits_in_progress > 0); @@ -852,7 +852,7 @@ void JournalMetadata::handle_commit_position_task() { m_async_op_tracker.finish_op(); }); ctx = new C_NotifyUpdate(this, ctx); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { // manually kick of a refresh in case the notification is missed // and ignore the next notification that we are about to send m_lock.lock(); @@ -861,7 +861,7 @@ void JournalMetadata::handle_commit_position_task() { refresh(ctx); }); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { schedule_laggy_clients_disconnect(ctx); }); @@ -1118,7 +1118,7 @@ void JournalMetadata::schedule_laggy_clients_disconnect(Context *on_finish) { ldout(m_cct, 1) << __func__ << ": " << client_id << ": scheduling disconnect" << dendl; - ctx = new FunctionContext([this, client_id, ctx](int r1) { + ctx = new LambdaContext([this, client_id, ctx](int r1) { ldout(m_cct, 10) << __func__ << ": " << client_id << ": flagging disconnected" << dendl; diff --git a/src/journal/JournalPlayer.cc b/src/journal/JournalPlayer.cc index 9033c93f2fb70..811508bf03c70 100644 --- a/src/journal/JournalPlayer.cc +++ b/src/journal/JournalPlayer.cc @@ -716,7 +716,7 @@ void JournalPlayer::schedule_watch(bool immediate) { << *m_active_tag_tid << dendl; m_async_op_tracker.start_op(); - FunctionContext *ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { handle_watch_assert_active(r); }); m_journal_metadata->assert_active_tag(*m_active_tag_tid, ctx); @@ -856,7 +856,7 @@ void JournalPlayer::handle_cache_rebalanced(uint64_t new_cache_bytes) { m_state = STATE_INIT; if (m_max_fetch_bytes >= min_bytes) { m_async_op_tracker.start_op(); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this](int r) { prefetch(); m_async_op_tracker.finish_op(); diff --git a/src/journal/JournalRecorder.cc b/src/journal/JournalRecorder.cc index 977b9b4f39456..9629d9f735f90 100644 --- a/src/journal/JournalRecorder.cc +++ b/src/journal/JournalRecorder.cc @@ -88,14 +88,14 @@ JournalRecorder::~JournalRecorder() { } void JournalRecorder::shut_down(Context *on_safe) { - on_safe = new FunctionContext( + on_safe = new LambdaContext( [this, on_safe](int r) { Context *ctx = nullptr; { std::lock_guard locker{m_lock}; if (m_in_flight_advance_sets != 0) { ceph_assert(m_on_object_set_advanced == nullptr); - m_on_object_set_advanced = new FunctionContext( + m_on_object_set_advanced = new LambdaContext( [on_safe, r](int) { on_safe->complete(r); }); diff --git a/src/journal/JournalTrimmer.cc b/src/journal/JournalTrimmer.cc index 84bc7e79cae26..d091243b388a0 100644 --- a/src/journal/JournalTrimmer.cc +++ b/src/journal/JournalTrimmer.cc @@ -57,7 +57,7 @@ void JournalTrimmer::shut_down(Context *on_finish) { m_journal_metadata->remove_listener(&m_metadata_listener); // chain the shut down sequence (reverse order) - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { m_async_op_tracker.wait_for_ops(on_finish); }); m_journal_metadata->flush_commit_position(on_finish); @@ -66,7 +66,7 @@ void JournalTrimmer::shut_down(Context *on_finish) { void JournalTrimmer::remove_objects(bool force, Context *on_finish) { ldout(m_cct, 20) << __func__ << dendl; - on_finish = new FunctionContext([this, force, on_finish](int r) { + on_finish = new LambdaContext([this, force, on_finish](int r) { std::lock_guard locker{m_lock}; if (m_remove_set_pending) { diff --git a/src/journal/Journaler.cc b/src/journal/Journaler.cc index 51a08d5be818e..6190674ade74e 100644 --- a/src/journal/Journaler.cc +++ b/src/journal/Journaler.cc @@ -180,7 +180,7 @@ void Journaler::shut_down(Context *on_finish) { std::swap(metadata, m_metadata); ceph_assert(metadata != nullptr); - on_finish = new FunctionContext([metadata, on_finish](int r) { + on_finish = new LambdaContext([metadata, on_finish](int r) { metadata->put(); on_finish->complete(0); }); @@ -192,7 +192,7 @@ void Journaler::shut_down(Context *on_finish) { return; } - on_finish = new FunctionContext([trimmer, metadata, on_finish](int r) { + on_finish = new LambdaContext([trimmer, metadata, on_finish](int r) { delete trimmer; metadata->shut_down(on_finish); }); @@ -241,7 +241,7 @@ void Journaler::create(uint8_t order, uint8_t splay_width, void Journaler::remove(bool force, Context *on_finish) { // chain journal removal (reverse order) - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { librados::AioCompletion *comp = librados::Rados::aio_create_completion( on_finish, nullptr, utils::rados_ctx_callback); r = m_header_ioctx.aio_remove(m_header_oid, comp); @@ -249,7 +249,7 @@ void Journaler::remove(bool force, Context *on_finish) { comp->release(); }); - on_finish = new FunctionContext([this, force, on_finish](int r) { + on_finish = new LambdaContext([this, force, on_finish](int r) { m_trimmer->remove_objects(force, on_finish); }); @@ -376,10 +376,11 @@ void Journaler::stop_replay(Context *on_finish) { std::swap(player, m_player); ceph_assert(player != nullptr); - on_finish = new FunctionContext([player, on_finish](int r) { + auto f = [player, on_finish](int r) { delete player; on_finish->complete(r); - }); + }; + on_finish = new LambdaContext(std::move(f)); player->shut_down(on_finish); } @@ -414,7 +415,7 @@ void Journaler::stop_append(Context *on_safe) { std::swap(recorder, m_recorder); ceph_assert(recorder != nullptr); - on_safe = new FunctionContext([recorder, on_safe](int r) { + on_safe = new LambdaContext([recorder, on_safe](int r) { delete recorder; on_safe->complete(r); }); diff --git a/src/journal/ObjectPlayer.cc b/src/journal/ObjectPlayer.cc index 46f615002e0f2..17bb8574a562b 100644 --- a/src/journal/ObjectPlayer.cc +++ b/src/journal/ObjectPlayer.cc @@ -283,7 +283,7 @@ void ObjectPlayer::schedule_watch() { ceph_assert(m_watch_task == nullptr); m_watch_task = m_timer.add_event_after( m_watch_interval, - new FunctionContext([this](int) { + new LambdaContext([this](int) { handle_watch_task(); })); } diff --git a/src/journal/ObjectRecorder.cc b/src/journal/ObjectRecorder.cc index 4bb6d03c18613..9f1f37eed1cd0 100644 --- a/src/journal/ObjectRecorder.cc +++ b/src/journal/ObjectRecorder.cc @@ -112,7 +112,7 @@ void ObjectRecorder::flush(Context *on_safe) { if (future.is_valid()) { // cannot be invoked while the same lock context - m_op_work_queue->queue(new FunctionContext( + m_op_work_queue->queue(new LambdaContext( [future, on_safe] (int r) mutable { future.flush(on_safe); })); diff --git a/src/librbd/DeepCopyRequest.cc b/src/librbd/DeepCopyRequest.cc index 64615685202a8..c96fa93bed65f 100644 --- a/src/librbd/DeepCopyRequest.cc +++ b/src/librbd/DeepCopyRequest.cc @@ -226,7 +226,7 @@ void DeepCopyRequest::send_copy_object_map() { } // rollback the object map (copy snapshot object map to HEAD) - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_copy_object_map(r); finish_op_ctx->complete(0); }); @@ -269,7 +269,7 @@ void DeepCopyRequest::send_refresh_object_map() { ldout(m_cct, 20) << dendl; - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_refresh_object_map(r); finish_op_ctx->complete(0); }); diff --git a/src/librbd/ExclusiveLock.cc b/src/librbd/ExclusiveLock.cc index 3d3ae93ca3fb9..84d137977be63 100644 --- a/src/librbd/ExclusiveLock.cc +++ b/src/librbd/ExclusiveLock.cc @@ -147,7 +147,7 @@ Context *ExclusiveLock::start_op(int* ret_val) { } m_async_op_tracker.start_op(); - return new FunctionContext([this](int r) { + return new LambdaContext([this](int r) { m_async_op_tracker.finish_op(); }); } @@ -202,7 +202,7 @@ void ExclusiveLock::pre_acquire_lock_handler(Context *on_finish) { PreAcquireRequest *req = PreAcquireRequest::create(m_image_ctx, on_finish); - m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) { + m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) { req->send(); })); } @@ -252,7 +252,7 @@ void ExclusiveLock::post_acquire_lock_handler(int r, Context *on_finish) { util::create_context_callback(this), util::create_context_callback(this)); - m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) { + m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) { req->send(); })); } @@ -301,7 +301,7 @@ void ExclusiveLock::pre_release_lock_handler(bool shutting_down, PreReleaseRequest *req = PreReleaseRequest::create( m_image_ctx, shutting_down, m_async_op_tracker, on_finish); - m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) { + m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) { req->send(); })); } diff --git a/src/librbd/ImageState.cc b/src/librbd/ImageState.cc index acdfaeea9cde8..9cfa2141670b7 100644 --- a/src/librbd/ImageState.cc +++ b/src/librbd/ImageState.cc @@ -44,7 +44,7 @@ public: { std::lock_guard locker{m_lock}; if (!m_in_flight.empty()) { - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this, on_finish](int r) { ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": completing flush" << dendl; @@ -130,7 +130,7 @@ public: m_in_flight.insert(handle); - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this, handle, watcher](int r) { handle_notify(handle, watcher); }); diff --git a/src/librbd/ImageWatcher.cc b/src/librbd/ImageWatcher.cc index da5a39fdf8450..d29c72400af5a 100644 --- a/src/librbd/ImageWatcher.cc +++ b/src/librbd/ImageWatcher.cc @@ -88,7 +88,7 @@ void ImageWatcher::unregister_watch(Context *on_finish) { cancel_async_requests(); - FunctionContext *ctx = new FunctionContext([this, on_finish](int r) { + auto ctx = new LambdaContext([this, on_finish](int r) { m_task_finisher->cancel_all(on_finish); }); Watcher::unregister_watch(ctx); @@ -99,7 +99,7 @@ void ImageWatcher::block_notifies(Context *on_finish) { CephContext *cct = m_image_ctx.cct; ldout(cct, 10) << this << " " << __func__ << dendl; - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { cancel_async_requests(); on_finish->complete(r); }); @@ -109,7 +109,7 @@ void ImageWatcher::block_notifies(Context *on_finish) { template void ImageWatcher::schedule_async_progress(const AsyncRequestId &request, uint64_t offset, uint64_t total) { - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( boost::bind(&ImageWatcher::notify_async_progress, this, request, offset, total)); m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx); @@ -129,7 +129,7 @@ int ImageWatcher::notify_async_progress(const AsyncRequestId &request, template void ImageWatcher::schedule_async_complete(const AsyncRequestId &request, int r) { - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( boost::bind(&ImageWatcher::notify_async_complete, this, request, r)); m_task_finisher->queue(ctx); } @@ -141,7 +141,7 @@ void ImageWatcher::notify_async_complete(const AsyncRequestId &request, << request << " = " << r << dendl; send_notify(AsyncCompletePayload(request, r), - new FunctionContext(boost::bind(&ImageWatcher::handle_async_complete, + new LambdaContext(boost::bind(&ImageWatcher::handle_async_complete, this, request, r, _1))); } @@ -331,7 +331,7 @@ void ImageWatcher::notify_header_update(librados::IoCtx &io_ctx, template void ImageWatcher::schedule_cancel_async_requests() { - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( boost::bind(&ImageWatcher::cancel_async_requests, this)); m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx); } @@ -401,7 +401,7 @@ void ImageWatcher::schedule_request_lock(bool use_timer, int timer_delay) { if (this->is_registered(this->m_watch_lock)) { ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( boost::bind(&ImageWatcher::notify_request_lock, this)); if (use_timer) { if (timer_delay < 0) { @@ -499,7 +499,7 @@ void ImageWatcher::schedule_async_request_timed_out(const AsyncRequestId &id) ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id << dendl; - Context *ctx = new FunctionContext(boost::bind( + Context *ctx = new LambdaContext(boost::bind( &ImageWatcher::async_request_timed_out, this, id)); Task task(TASK_CODE_ASYNC_REQUEST, id); @@ -530,7 +530,7 @@ void ImageWatcher::notify_async_request(const AsyncRequestId &async_request_i ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id << dendl; - Context *on_notify = new FunctionContext([this, async_request_id](int r) { + Context *on_notify = new LambdaContext([this, async_request_id](int r) { if (r < 0) { // notification failed -- don't expect updates Context *on_complete = remove_async_request(async_request_id); @@ -540,7 +540,7 @@ void ImageWatcher::notify_async_request(const AsyncRequestId &async_request_i } }); - Context *on_complete = new FunctionContext( + Context *on_complete = new LambdaContext( [this, async_request_id, on_finish](int r) { m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id)); on_finish->complete(r); diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc index 1e4eee18172c0..06d757673d554 100644 --- a/src/librbd/Journal.cc +++ b/src/librbd/Journal.cc @@ -92,7 +92,7 @@ struct C_IsTagOwner : public Context { Journaler *journaler = this->journaler; Context *on_finish = this->on_finish; - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( [journaler, on_finish](int r) { on_finish->complete(r); delete journaler; @@ -168,7 +168,7 @@ struct GetTagsRequest { void send_get_client() { ldout(cct, 20) << __func__ << dendl; - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( [this](int r) { handle_get_client(r); }); @@ -210,7 +210,7 @@ struct GetTagsRequest { void send_get_tags() { ldout(cct, 20) << __func__ << dendl; - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( [this](int r) { handle_get_tags(r); }); @@ -583,9 +583,9 @@ void Journal::close(Context *on_finish) { CephContext *cct = m_image_ctx.cct; ldout(cct, 20) << this << " " << __func__ << dendl; - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { // remove our handler from object dispatcher chain - preserve error - auto ctx = new FunctionContext([on_finish, r](int _) { + auto ctx = new LambdaContext([on_finish, r](int _) { on_finish->complete(r); }); m_image_ctx.io_object_dispatcher->shut_down_object_dispatch( @@ -896,7 +896,7 @@ void Journal::append_op_event(uint64_t op_tid, } on_safe = create_async_context_callback(m_image_ctx, on_safe); - on_safe = new FunctionContext([this, on_safe](int r) { + on_safe = new LambdaContext([this, on_safe](int r) { // ensure all committed IO before this op is committed m_journaler->flush_commit_position(on_safe); }); @@ -1013,7 +1013,7 @@ void Journal::start_external_replay(journal::Replay **journal_replay, ceph_assert(m_journal_replay == nullptr); on_start = util::create_async_context_callback(m_image_ctx, on_start); - on_start = new FunctionContext( + on_start = new LambdaContext( [this, journal_replay, on_start](int r) { handle_start_external_replay(r, journal_replay, on_start); }); @@ -1123,7 +1123,7 @@ void Journal::destroy_journaler(int r) { Context *ctx = create_async_context_callback( m_image_ctx, create_context_callback< Journal, &Journal::handle_journal_destroyed>(this)); - ctx = new FunctionContext( + ctx = new LambdaContext( [this, ctx](int r) { std::lock_guard locker{m_lock}; m_journaler->shut_down(ctx); @@ -1288,7 +1288,7 @@ void Journal::handle_replay_complete(int r) { } } - Context *ctx = new FunctionContext([this, cct](int r) { + Context *ctx = new LambdaContext([this, cct](int r) { ldout(cct, 20) << this << " handle_replay_complete: " << "handle shut down replay" << dendl; @@ -1306,11 +1306,11 @@ void Journal::handle_replay_complete(int r) { handle_flushing_replay(); } }); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { // ensure the commit position is flushed to disk m_journaler->flush_commit_position(ctx); }); - ctx = new FunctionContext([this, cct, cancel_ops, ctx](int r) { + ctx = new LambdaContext([this, cct, cancel_ops, ctx](int r) { ldout(cct, 20) << this << " handle_replay_complete: " << "shut down replay" << dendl; m_journal_replay->shut_down(cancel_ops, ctx); @@ -1358,11 +1358,11 @@ void Journal::handle_replay_process_safe(ReplayEntry replay_entry, int r) { // stop replay, shut down, and restart Context* ctx = create_context_callback< Journal, &Journal::handle_flushing_restart>(this); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { // ensure the commit position is flushed to disk m_journaler->flush_commit_position(ctx); }); - ctx = new FunctionContext([this, cct, ctx](int r) { + ctx = new LambdaContext([this, cct, ctx](int r) { ldout(cct, 20) << this << " handle_replay_process_safe: " << "shut down replay" << dendl; { @@ -1685,7 +1685,7 @@ void Journal::handle_metadata_updated() { // pull the most recent tags from the journal, decode, and // update the internal tag state C_RefreshTags *refresh_ctx = new C_RefreshTags(m_async_journal_op_tracker); - refresh_ctx->on_finish = new FunctionContext( + refresh_ctx->on_finish = new LambdaContext( [this, refresh_sequence, refresh_ctx](int r) { handle_refresh_metadata(refresh_sequence, refresh_ctx->tag_tid, refresh_ctx->tag_data, r); diff --git a/src/librbd/Journal.h b/src/librbd/Journal.h index 7213120d62fbc..a467178a92282 100644 --- a/src/librbd/Journal.h +++ b/src/librbd/Journal.h @@ -310,7 +310,7 @@ private: MetadataListener(Journal *journal) : journal(journal) { } void handle_update(::journal::JournalMetadata *) override { - FunctionContext *ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { journal->handle_metadata_updated(); }); journal->m_work_queue->queue(ctx, 0); diff --git a/src/librbd/ManagedLock.cc b/src/librbd/ManagedLock.cc index 28867a98733e3..ba97180b0b0c0 100644 --- a/src/librbd/ManagedLock.cc +++ b/src/librbd/ManagedLock.cc @@ -492,7 +492,7 @@ void ManagedLock::send_acquire_lock() { m_state = STATE_ACQUIRING; m_cookie = encode_lock_cookie(watch_handle); - m_work_queue->queue(new FunctionContext([this](int r) { + m_work_queue->queue(new LambdaContext([this](int r) { pre_acquire_lock_handler(create_context_callback< ManagedLock, &ManagedLock::handle_pre_acquire_lock>(this)); })); @@ -531,7 +531,7 @@ void ManagedLock::handle_acquire_lock(int r) { m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED); - m_work_queue->queue(new FunctionContext([this, r](int ret) { + m_work_queue->queue(new LambdaContext([this, r](int ret) { post_acquire_lock_handler(r, create_context_callback< ManagedLock, &ManagedLock::handle_post_acquire_lock>(this)); })); @@ -559,7 +559,7 @@ void ManagedLock::revert_to_unlock_state(int r) { using managed_lock::ReleaseRequest; ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, m_work_queue, m_oid, m_cookie, - new FunctionContext([this, r](int ret) { + new LambdaContext([this, r](int ret) { std::lock_guard locker{m_lock}; ceph_assert(ret == 0); complete_active_action(STATE_UNLOCKED, r); @@ -604,7 +604,7 @@ void ManagedLock::send_reacquire_lock() { auto ctx = create_context_callback< ManagedLock, &ManagedLock::handle_reacquire_lock>(this); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { post_reacquire_lock_handler(r, ctx); }); @@ -681,7 +681,7 @@ void ManagedLock::send_release_lock() { ldout(m_cct, 10) << dendl; m_state = STATE_PRE_RELEASING; - m_work_queue->queue(new FunctionContext([this](int r) { + m_work_queue->queue(new LambdaContext([this](int r) { pre_release_lock_handler(false, create_context_callback< ManagedLock, &ManagedLock::handle_pre_release_lock>(this)); })); @@ -724,7 +724,7 @@ void ManagedLock::handle_release_lock(int r) { m_post_next_state = STATE_LOCKED; } - m_work_queue->queue(new FunctionContext([this, r](int ret) { + m_work_queue->queue(new LambdaContext([this, r](int ret) { post_release_lock_handler(false, r, create_context_callback< ManagedLock, &ManagedLock::handle_post_release_lock>(this)); })); @@ -744,7 +744,7 @@ void ManagedLock::send_shutdown() { ceph_assert(ceph_mutex_is_locked(m_lock)); if (m_state == STATE_UNLOCKED) { m_state = STATE_SHUTTING_DOWN; - m_work_queue->queue(new FunctionContext([this](int r) { + m_work_queue->queue(new LambdaContext([this](int r) { shutdown_handler(r, create_context_callback< ManagedLock, &ManagedLock::handle_shutdown>(this)); })); @@ -772,7 +772,7 @@ void ManagedLock::send_shutdown_release() { std::lock_guard locker{m_lock}; - m_work_queue->queue(new FunctionContext([this](int r) { + m_work_queue->queue(new LambdaContext([this](int r) { pre_release_lock_handler(true, create_context_callback< ManagedLock, &ManagedLock::handle_shutdown_pre_release>(this)); })); @@ -794,7 +794,7 @@ void ManagedLock::handle_shutdown_pre_release(int r) { using managed_lock::ReleaseRequest; ReleaseRequest* req = ReleaseRequest::create(m_ioctx, m_watcher, m_work_queue, m_oid, cookie, - new FunctionContext([this, r](int l) { + new LambdaContext([this, r](int l) { int rst = r < 0 ? r : l; post_release_lock_handler(true, rst, create_context_callback< ManagedLock, &ManagedLock::handle_shutdown_post_release>(this)); @@ -814,7 +814,7 @@ template void ManagedLock::wait_for_tracked_ops(int r) { ldout(m_cct, 10) << "r=" << r << dendl; - Context *ctx = new FunctionContext([this, r](int ret) { + Context *ctx = new LambdaContext([this, r](int ret) { complete_shutdown(r); }); diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc index b7f3f06e640ae..fb09e3d989a02 100644 --- a/src/librbd/ObjectMap.cc +++ b/src/librbd/ObjectMap.cc @@ -273,7 +273,7 @@ void ObjectMap::detained_aio_update(UpdateOperation &&op) { ldout(cct, 20) << "in-flight update cell: " << cell << dendl; Context *on_finish = op.on_finish; - Context *ctx = new FunctionContext([this, cell, on_finish](int r) { + Context *ctx = new LambdaContext([this, cell, on_finish](int r) { handle_detained_aio_update(cell, r, on_finish); }); aio_update(CEPH_NOSNAP, op.start_object_no, op.end_object_no, op.new_state, diff --git a/src/librbd/Operations.cc b/src/librbd/Operations.cc index cfb9b4467f5f3..a327f9447bbc6 100644 --- a/src/librbd/Operations.cc +++ b/src/librbd/Operations.cc @@ -588,13 +588,13 @@ void Operations::execute_rename(const std::string &dest_name, if (m_image_ctx.old_format) { // unregister watch before and register back after rename on_finish = new C_NotifyUpdate(m_image_ctx, on_finish); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { if (m_image_ctx.old_format) { m_image_ctx.image_watcher->set_oid(m_image_ctx.header_oid); } m_image_ctx.image_watcher->register_watch(on_finish); }); - on_finish = new FunctionContext([this, dest_name, on_finish](int r) { + on_finish = new LambdaContext([this, dest_name, on_finish](int r) { std::shared_lock owner_locker{m_image_ctx.owner_lock}; operation::RenameRequest *req = new operation::RenameRequest( m_image_ctx, on_finish, dest_name); diff --git a/src/librbd/Watcher.cc b/src/librbd/Watcher.cc index f13e02d5834f2..0014cdf3d9149 100644 --- a/src/librbd/Watcher.cc +++ b/src/librbd/Watcher.cc @@ -162,7 +162,7 @@ void Watcher::unregister_watch(Context *on_finish) { << dendl; ceph_assert(m_unregister_watch_ctx == nullptr); - m_unregister_watch_ctx = new FunctionContext([this, on_finish](int r) { + m_unregister_watch_ctx = new LambdaContext([this, on_finish](int r) { unregister_watch(on_finish); }); return; @@ -234,7 +234,7 @@ void Watcher::handle_error(uint64_t handle, int err) { m_watch_blacklisted = true; } - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( boost::bind(&Watcher::rewatch, this)); m_work_queue->queue(ctx); } diff --git a/src/librbd/cache/ObjectCacherObjectDispatch.cc b/src/librbd/cache/ObjectCacherObjectDispatch.cc index 8343dca5d5a72..26a75856cb6e3 100644 --- a/src/librbd/cache/ObjectCacherObjectDispatch.cc +++ b/src/librbd/cache/ObjectCacherObjectDispatch.cc @@ -162,7 +162,7 @@ void ObjectCacherObjectDispatch::shut_down(Context* on_finish) { // chain shut down in reverse order // shut down the cache - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { m_object_cacher->stop(); on_finish->complete(r); }); @@ -235,7 +235,7 @@ bool ObjectCacherObjectDispatch::discard( // discard the cache state after changes are committed to disk (and to // prevent races w/ readahead) auto ctx = *on_finish; - *on_finish = new FunctionContext( + *on_finish = new LambdaContext( [this, object_extents, ctx](int r) { m_cache_lock.lock(); m_object_cacher->discard_set(m_object_set, object_extents); diff --git a/src/librbd/cache/ParentCacheObjectDispatch.cc b/src/librbd/cache/ParentCacheObjectDispatch.cc index 87fba3c560b46..23edaa0576a6a 100644 --- a/src/librbd/cache/ParentCacheObjectDispatch.cc +++ b/src/librbd/cache/ParentCacheObjectDispatch.cc @@ -50,7 +50,7 @@ void ParentCacheObjectDispatch::init(Context* on_finish) { return; } - Context* create_session_ctx = new FunctionContext([this, on_finish](int ret) { + Context* create_session_ctx = new LambdaContext([this, on_finish](int ret) { m_connecting.store(false); if (on_finish != nullptr) { on_finish->complete(ret); @@ -90,7 +90,7 @@ bool ParentCacheObjectDispatch::read( * So, we need to check if session is normal again. If session work, * we need set m_connecting to false. */ if (!m_cache_client->is_session_work()) { - Context* on_finish = new FunctionContext([this](int ret) { + Context* on_finish = new LambdaContext([this](int ret) { m_connecting.store(false); }); create_cache_session(on_finish, true); @@ -167,7 +167,7 @@ int ParentCacheObjectDispatch::create_cache_session(Context* on_finish, bool auto cct = m_image_ctx->cct; ldout(cct, 20) << dendl; - Context* register_ctx = new FunctionContext([this, cct, on_finish](int ret) { + Context* register_ctx = new LambdaContext([this, cct, on_finish](int ret) { if (ret < 0) { lderr(cct) << "Parent cache fail to register client." << dendl; } else { @@ -177,7 +177,7 @@ int ParentCacheObjectDispatch::create_cache_session(Context* on_finish, bool on_finish->complete(ret); }); - Context* connect_ctx = new FunctionContext( + Context* connect_ctx = new LambdaContext( [this, cct, register_ctx](int ret) { if (ret < 0) { lderr(cct) << "Parent cache fail to connect RO daeomn." << dendl; diff --git a/src/librbd/cache/WriteAroundObjectDispatch.cc b/src/librbd/cache/WriteAroundObjectDispatch.cc index 06eab207bcd19..3a6d315813d9c 100644 --- a/src/librbd/cache/WriteAroundObjectDispatch.cc +++ b/src/librbd/cache/WriteAroundObjectDispatch.cc @@ -150,7 +150,7 @@ bool WriteAroundObjectDispatch::flush( auto ctx = util::create_async_context_callback(*m_image_ctx, *on_finish); *dispatch_result = io::DISPATCH_RESULT_CONTINUE; - *on_finish = new FunctionContext([this, tid](int r) { + *on_finish = new LambdaContext([this, tid](int r) { handle_in_flight_flush_complete(r, tid); }); @@ -219,7 +219,7 @@ bool WriteAroundObjectDispatch::dispatch_io( auto ctx = util::create_async_context_callback(*m_image_ctx, *on_finish); *dispatch_result = io::DISPATCH_RESULT_CONTINUE; - *on_finish = new FunctionContext( + *on_finish = new LambdaContext( [this, tid, object_no, object_off, object_len](int r) { handle_in_flight_io_complete(r, tid, object_no, object_off, object_len); }); diff --git a/src/librbd/deep_copy/ImageCopyRequest.cc b/src/librbd/deep_copy/ImageCopyRequest.cc index 705ddcb5a4b45..f1e95c1bb4a67 100644 --- a/src/librbd/deep_copy/ImageCopyRequest.cc +++ b/src/librbd/deep_copy/ImageCopyRequest.cc @@ -117,7 +117,7 @@ void ImageCopyRequest::send_next_object_copy() { ++m_current_ops; - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this, ono](int r) { handle_object_copy(ono, r); }); diff --git a/src/librbd/deep_copy/ObjectCopyRequest.cc b/src/librbd/deep_copy/ObjectCopyRequest.cc index d630c03a83999..42ad774ff4906 100644 --- a/src/librbd/deep_copy/ObjectCopyRequest.cc +++ b/src/librbd/deep_copy/ObjectCopyRequest.cc @@ -391,7 +391,7 @@ void ObjectCopyRequest::send_write_object() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_write_object(r); finish_op_ctx->complete(0); }); @@ -468,7 +468,7 @@ void ObjectCopyRequest::send_update_object_map() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_update_object_map(r); finish_op_ctx->complete(0); }); @@ -509,7 +509,7 @@ Context *ObjectCopyRequest::start_lock_op(ceph::shared_mutex &owner_lock, int* r) { ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock)); if (m_dst_image_ctx->exclusive_lock == nullptr) { - return new FunctionContext([](int r) {}); + return new LambdaContext([](int r) {}); } return m_dst_image_ctx->exclusive_lock->start_op(r); } diff --git a/src/librbd/deep_copy/SetHeadRequest.cc b/src/librbd/deep_copy/SetHeadRequest.cc index a6c43dfedd6fe..1e056b9580cb0 100644 --- a/src/librbd/deep_copy/SetHeadRequest.cc +++ b/src/librbd/deep_copy/SetHeadRequest.cc @@ -65,7 +65,7 @@ void SetHeadRequest::send_set_size() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_set_size(r); finish_op_ctx->complete(0); }); @@ -122,7 +122,7 @@ void SetHeadRequest::send_detach_parent() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_detach_parent(r); finish_op_ctx->complete(0); }); @@ -171,7 +171,7 @@ void SetHeadRequest::send_attach_parent() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_attach_parent(r); finish_op_ctx->complete(0); }); @@ -204,7 +204,7 @@ template Context *SetHeadRequest::start_lock_op(int* r) { std::shared_lock owner_locker{m_image_ctx->owner_lock}; if (m_image_ctx->exclusive_lock == nullptr) { - return new FunctionContext([](int r) {}); + return new LambdaContext([](int r) {}); } return m_image_ctx->exclusive_lock->start_op(r); } diff --git a/src/librbd/deep_copy/SnapshotCopyRequest.cc b/src/librbd/deep_copy/SnapshotCopyRequest.cc index e53386b7148e9..0eaaa4778606d 100644 --- a/src/librbd/deep_copy/SnapshotCopyRequest.cc +++ b/src/librbd/deep_copy/SnapshotCopyRequest.cc @@ -182,7 +182,7 @@ void SnapshotCopyRequest::send_snap_unprotect() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_unprotect(r); finish_op_ctx->complete(0); }); @@ -279,7 +279,7 @@ void SnapshotCopyRequest::send_snap_remove() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_remove(r); finish_op_ctx->complete(0); }); @@ -380,7 +380,7 @@ void SnapshotCopyRequest::send_snap_create() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_create(r); finish_op_ctx->complete(0); }); @@ -488,7 +488,7 @@ void SnapshotCopyRequest::send_snap_protect() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_protect(r); finish_op_ctx->complete(0); }); @@ -577,7 +577,7 @@ void SnapshotCopyRequest::send_resize_object_map() { auto finish_op_ctx = start_lock_op(m_dst_image_ctx->owner_lock, &r); if (finish_op_ctx != nullptr) { - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_resize_object_map(r); finish_op_ctx->complete(0); }); @@ -625,7 +625,7 @@ template void SnapshotCopyRequest::error(int r) { ldout(m_cct, 20) << "r=" << r << dendl; - m_work_queue->queue(new FunctionContext([this, r](int r1) { finish(r); })); + m_work_queue->queue(new LambdaContext([this, r](int r1) { finish(r); })); } template @@ -662,7 +662,7 @@ template Context *SnapshotCopyRequest::start_lock_op(ceph::shared_mutex &owner_lock, int* r) { ceph_assert(ceph_mutex_is_locked(m_dst_image_ctx->owner_lock)); if (m_dst_image_ctx->exclusive_lock == nullptr) { - return new FunctionContext([](int r) {}); + return new LambdaContext([](int r) {}); } return m_dst_image_ctx->exclusive_lock->start_op(r); } diff --git a/src/librbd/deep_copy/SnapshotCreateRequest.cc b/src/librbd/deep_copy/SnapshotCreateRequest.cc index 5f4ae5121ddf6..23ebb1c666d9e 100644 --- a/src/librbd/deep_copy/SnapshotCreateRequest.cc +++ b/src/librbd/deep_copy/SnapshotCreateRequest.cc @@ -76,7 +76,7 @@ void SnapshotCreateRequest::send_create_snap() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_create_snap(r); finish_op_ctx->complete(0); }); @@ -140,7 +140,7 @@ void SnapshotCreateRequest::send_create_object_map() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_create_object_map(r); finish_op_ctx->complete(0); }); @@ -168,7 +168,7 @@ template Context *SnapshotCreateRequest::start_lock_op(int* r) { std::shared_lock owner_locker{m_dst_image_ctx->owner_lock}; if (m_dst_image_ctx->exclusive_lock == nullptr) { - return new FunctionContext([](int r) {}); + return new LambdaContext([](int r) {}); } return m_dst_image_ctx->exclusive_lock->start_op(r); } diff --git a/src/librbd/image/RemoveRequest.cc b/src/librbd/image/RemoveRequest.cc index f5d9c227fda95..7751fef62cb13 100644 --- a/src/librbd/image/RemoveRequest.cc +++ b/src/librbd/image/RemoveRequest.cc @@ -418,7 +418,7 @@ template void RemoveRequest::remove_v1_image() { ldout(m_cct, 20) << dendl; - Context *ctx = new FunctionContext([this] (int r) { + Context *ctx = new LambdaContext([this] (int r) { r = tmap_rm(m_ioctx, m_image_name); handle_remove_v1_image(r); }); diff --git a/src/librbd/image/ValidatePoolRequest.cc b/src/librbd/image/ValidatePoolRequest.cc index 2214f81b0a09b..9e4af7bf9904d 100644 --- a/src/librbd/image/ValidatePoolRequest.cc +++ b/src/librbd/image/ValidatePoolRequest.cc @@ -97,7 +97,7 @@ void ValidatePoolRequest::create_snapshot() { // allocate a self-managed snapshot id if this a new pool to force // self-managed snapshot mode - auto ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { r = m_io_ctx.selfmanaged_snap_create(&m_snap_id); handle_create_snapshot(r); }); @@ -161,7 +161,7 @@ template void ValidatePoolRequest::remove_snapshot() { ldout(m_cct, 5) << dendl; - auto ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { r = m_io_ctx.selfmanaged_snap_remove(m_snap_id); handle_remove_snapshot(r); }); diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc index 9660a0ef4f0de..e2fa5632e36e1 100644 --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@ -1420,7 +1420,7 @@ int validate_pool(IoCtx &io_ctx, CephContext *cct) { } auto *throttle = m_throttle; - auto *end_op_ctx = new FunctionContext([throttle](int r) { + auto *end_op_ctx = new LambdaContext([throttle](int r) { throttle->end_op(r); }); auto gather_ctx = new C_Gather(m_dest->cct, end_op_ctx); diff --git a/src/librbd/io/ImageRequest.cc b/src/librbd/io/ImageRequest.cc index d835368153f84..dc2acce5df011 100644 --- a/src/librbd/io/ImageRequest.cc +++ b/src/librbd/io/ImageRequest.cc @@ -692,7 +692,7 @@ void ImageFlushRequest::send_request() { auto object_dispatch_spec = ObjectDispatchSpec::create_flush( &image_ctx, OBJECT_DISPATCH_LAYER_NONE, m_flush_source, journal_tid, this->m_trace, ctx); - ctx = new FunctionContext([object_dispatch_spec](int r) { + ctx = new LambdaContext([object_dispatch_spec](int r) { object_dispatch_spec->send(); }); diff --git a/src/librbd/io/ObjectDispatcher.cc b/src/librbd/io/ObjectDispatcher.cc index 513f8adf43dc9..9a74a81340db4 100644 --- a/src/librbd/io/ObjectDispatcher.cc +++ b/src/librbd/io/ObjectDispatcher.cc @@ -249,17 +249,17 @@ void ObjectDispatcher::shut_down_object_dispatch( auto async_op_tracker = object_dispatch_meta.async_op_tracker; Context* ctx = *on_finish; - ctx = new FunctionContext( + ctx = new LambdaContext( [object_dispatch, async_op_tracker, ctx](int r) { delete object_dispatch; delete async_op_tracker; ctx->complete(r); }); - ctx = new FunctionContext([object_dispatch, ctx](int r) { + ctx = new LambdaContext([object_dispatch, ctx](int r) { object_dispatch->shut_down(ctx); }); - *on_finish = new FunctionContext([async_op_tracker, ctx](int r) { + *on_finish = new LambdaContext([async_op_tracker, ctx](int r) { async_op_tracker->wait_for_ops(ctx); }); } diff --git a/src/librbd/io/ObjectRequest.cc b/src/librbd/io/ObjectRequest.cc index a8eb54e0be0df..d3dd77bc21cfd 100644 --- a/src/librbd/io/ObjectRequest.cc +++ b/src/librbd/io/ObjectRequest.cc @@ -200,7 +200,7 @@ void ObjectReadRequest::read_object() { std::shared_lock image_locker{image_ctx->image_lock}; if (image_ctx->object_map != nullptr && !image_ctx->object_map->object_may_exist(this->m_object_no)) { - image_ctx->op_work_queue->queue(new FunctionContext([this](int r) { + image_ctx->op_work_queue->queue(new LambdaContext([this](int r) { read_parent(); }), 0); return; diff --git a/src/librbd/io/SimpleSchedulerObjectDispatch.cc b/src/librbd/io/SimpleSchedulerObjectDispatch.cc index 1abb8e3109ec6..6b6a60c9862bf 100644 --- a/src/librbd/io/SimpleSchedulerObjectDispatch.cc +++ b/src/librbd/io/SimpleSchedulerObjectDispatch.cc @@ -145,7 +145,7 @@ void SimpleSchedulerObjectDispatch::ObjectRequests::dispatch_delayed_requests auto offset = it.first; auto &merged_requests = it.second; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [requests=std::move(merged_requests.requests), latency_stats, latency_stats_lock, start_time=ceph_clock_now()](int r) { if (latency_stats) { @@ -404,7 +404,7 @@ void SimpleSchedulerObjectDispatch::register_in_flight_request( auto dispatch_seq = ++m_dispatch_seq; it->second->set_dispatch_seq(dispatch_seq); - *on_finish = new FunctionContext( + *on_finish = new LambdaContext( [this, object_no, dispatch_seq, start_time, ctx=*on_finish](int r) { ctx->complete(r); @@ -498,7 +498,7 @@ void SimpleSchedulerObjectDispatch::schedule_dispatch_delayed_requests() { object_requests = m_dispatch_queue.front().get(); } - m_timer_task = new FunctionContext( + m_timer_task = new LambdaContext( [this, object_no=object_requests->get_object_no()](int r) { ceph_assert(ceph_mutex_is_locked(*m_timer_lock)); auto cct = m_image_ctx->cct; @@ -506,7 +506,7 @@ void SimpleSchedulerObjectDispatch::schedule_dispatch_delayed_requests() { m_timer_task = nullptr; m_image_ctx->op_work_queue->queue( - new FunctionContext( + new LambdaContext( [this, object_no](int r) { std::lock_guard locker{m_lock}; dispatch_delayed_requests(object_no); diff --git a/src/librbd/journal/ObjectDispatch.cc b/src/librbd/journal/ObjectDispatch.cc index 77728e6a4d28c..fe7f28714431e 100644 --- a/src/librbd/journal/ObjectDispatch.cc +++ b/src/librbd/journal/ObjectDispatch.cc @@ -194,7 +194,7 @@ bool ObjectDispatch::flush( ldout(cct, 20) << dendl; auto ctx = *on_finish; - *on_finish = new FunctionContext( + *on_finish = new LambdaContext( [image_ctx=m_image_ctx, ctx, journal_tid=*journal_tid](int r) { image_ctx->journal->commit_io_event(journal_tid, r); ctx->complete(r); diff --git a/src/librbd/journal/Replay.cc b/src/librbd/journal/Replay.cc index d2d7175455903..cd406df7b01ca 100644 --- a/src/librbd/journal/Replay.cc +++ b/src/librbd/journal/Replay.cc @@ -324,7 +324,7 @@ void Replay::replay_op_ready(uint64_t op_tid, Context *on_resume) { // resume the op state machine once the associated OpFinishEvent // is processed - op_event.on_op_finish_event = new FunctionContext( + op_event.on_op_finish_event = new LambdaContext( [on_resume](int r) { on_resume->complete(r); }); diff --git a/src/librbd/mirror/DisableRequest.cc b/src/librbd/mirror/DisableRequest.cc index ad9a525c9bd2a..929a09685bd77 100644 --- a/src/librbd/mirror/DisableRequest.cc +++ b/src/librbd/mirror/DisableRequest.cc @@ -313,7 +313,7 @@ void DisableRequest::send_remove_snap(const std::string &client_id, Context *ctx = create_context_callback( &DisableRequest::handle_remove_snap, client_id); - ctx = new FunctionContext([this, snap_namespace, snap_name, ctx](int r) { + ctx = new LambdaContext([this, snap_namespace, snap_name, ctx](int r) { m_image_ctx->operations->snap_remove(snap_namespace, snap_name.c_str(), ctx); @@ -477,7 +477,7 @@ Context *DisableRequest::create_context_callback( Context*(DisableRequest::*handle)(int*, const std::string &client_id), const std::string &client_id) { - return new FunctionContext([this, handle, client_id](int r) { + return new LambdaContext([this, handle, client_id](int r) { Context *on_finish = (this->*handle)(&r, client_id); if (on_finish != nullptr) { on_finish->complete(r); diff --git a/src/mds/MDBalancer.cc b/src/mds/MDBalancer.cc index 032053fc5745c..9e533c0cf4b86 100644 --- a/src/mds/MDBalancer.cc +++ b/src/mds/MDBalancer.cc @@ -548,13 +548,13 @@ void MDBalancer::queue_split(const CDir *dir, bool fast) // Do the split ASAP: enqueue it in the MDSRank waiters which are // run at the end of dispatching the current request mds->queue_waiter(new MDSInternalContextWrapper(mds, - new FunctionContext(callback))); + new LambdaContext(std::move(callback)))); } else if (is_new) { // Set a timer to really do the split: we don't do it immediately // so that bursts of ops on a directory have a chance to go through // before we freeze it. mds->timer.add_event_after(bal_fragment_interval, - new FunctionContext(callback)); + new LambdaContext(std::move(callback))); } } @@ -616,7 +616,7 @@ void MDBalancer::queue_merge(CDir *dir) dout(20) << __func__ << " enqueued dir " << *dir << dendl; merge_pending.insert(frag); mds->timer.add_event_after(bal_fragment_interval, - new FunctionContext(callback)); + new LambdaContext(std::move(callback))); } else { dout(20) << __func__ << " dir already in queue " << *dir << dendl; } diff --git a/src/mds/MDCache.cc b/src/mds/MDCache.cc index 4e5fabd38b1a9..41d0411352a1b 100644 --- a/src/mds/MDCache.cc +++ b/src/mds/MDCache.cc @@ -620,7 +620,7 @@ void MDCache::open_mydir_frag(MDSContext *c) { open_mydir_inode( new MDSInternalContextWrapper(mds, - new FunctionContext([this, c](int r) { + new LambdaContext([this, c](int r) { if (r < 0) { c->complete(r); return; @@ -2671,7 +2671,7 @@ void MDCache::send_resolves() // I'm survivor: refresh snap cache mds->snapclient->sync( new MDSInternalContextWrapper(mds, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { maybe_finish_slave_resolve(); }) ) @@ -5327,7 +5327,7 @@ bool MDCache::process_imported_caps() open_file_table.prefetch_inodes()) { open_file_table.wait_for_prefetch( new MDSInternalContextWrapper(mds, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { ceph_assert(rejoin_gather.count(mds->get_nodeid())); process_imported_caps(); }) @@ -5928,7 +5928,7 @@ bool MDCache::open_undef_inodes_dirfrags() MDSGatherBuilder gather(g_ceph_context, new MDSInternalContextWrapper(mds, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { if (rejoin_gather.empty()) rejoin_gather_finish(); }) @@ -7904,7 +7904,7 @@ again: MDSContext *fin = nullptr; if (shutdown_exporting_strays.empty()) { fin = new MDSInternalContextWrapper(mds, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { shutdown_export_strays(); }) ); @@ -12573,7 +12573,7 @@ void MDCache::enqueue_scrub_work(MDRequestRef& mdr) if (header->get_recursive()) { header->get_origin()->get(CInode::PIN_SCRUBQUEUE); fin = new MDSInternalContextWrapper(mds, - new FunctionContext([this, header](int r) { + new LambdaContext([this, header](int r) { recursive_scrub_finish(header); header->get_origin()->put(CInode::PIN_SCRUBQUEUE); }) @@ -12585,14 +12585,14 @@ void MDCache::enqueue_scrub_work(MDRequestRef& mdr) // If the scrub did some repair, then flush the journal at the end of // the scrub. Otherwise in the case of e.g. rewriting a backtrace // the on disk state will still look damaged. - auto scrub_finish = new FunctionContext([this, header, fin](int r){ + auto scrub_finish = new LambdaContext([this, header, fin](int r){ if (!header->get_repaired()) { if (fin) fin->complete(r); return; } - auto flush_finish = new FunctionContext([this, fin](int r){ + auto flush_finish = new LambdaContext([this, fin](int r){ dout(4) << "Expiring log segments because scrub did some repairs" << dendl; mds->mdlog->trim_all(); diff --git a/src/mds/MDSDaemon.cc b/src/mds/MDSDaemon.cc index 1795d2d18f3f3..c24baf0868d52 100644 --- a/src/mds/MDSDaemon.cc +++ b/src/mds/MDSDaemon.cc @@ -459,7 +459,7 @@ void MDSDaemon::reset_tick() // schedule tick_event = timer.add_event_after( g_conf()->mds_tick_interval, - new FunctionContext([this](int) { + new LambdaContext([this](int) { ceph_assert(ceph_mutex_is_locked_by_me(mds_lock)); tick(); })); @@ -872,8 +872,8 @@ void MDSDaemon::handle_mds_map(const cref_t &m) if (mds_rank == NULL) { mds_rank = new MDSRankDispatcher(whoami, mds_lock, clog, timer, beacon, mdsmap, messenger, monc, &mgrc, - new FunctionContext([this](int r){respawn();}), - new FunctionContext([this](int r){suicide();})); + new LambdaContext([this](int r){respawn();}), + new LambdaContext([this](int r){suicide();})); dout(10) << __func__ << ": initializing MDS rank " << mds_rank->get_nodeid() << dendl; mds_rank->init(); diff --git a/src/mds/MDSRank.cc b/src/mds/MDSRank.cc index 77696da76fc19..f1dfdd50488d9 100644 --- a/src/mds/MDSRank.cc +++ b/src/mds/MDSRank.cc @@ -81,7 +81,7 @@ private: // previous segments for expiry mdlog->start_new_segment(); - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { handle_flush_mdlog(r); }); @@ -106,7 +106,7 @@ private: void clear_mdlog() { dout(20) << __func__ << dendl; - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { handle_clear_mdlog(r); }); @@ -163,7 +163,7 @@ private: return; } - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { handle_expire_segments(r); }); expiry_gather->set_finisher(new MDSInternalContextWrapper(mds, ctx)); @@ -181,7 +181,7 @@ private: void trim_segments() { dout(20) << __func__ << dendl; - Context *ctx = new C_OnFinisher(new FunctionContext([this](int _) { + Context *ctx = new C_OnFinisher(new LambdaContext([this](int) { std::lock_guard locker(mds->mds_lock); trim_expired_segments(); }), mds->finisher); @@ -206,7 +206,7 @@ private: void write_journal_head() { dout(20) << __func__ << dendl; - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { std::lock_guard locker(mds->mds_lock); handle_write_head(r); }); @@ -283,7 +283,7 @@ private: return; } - timer_task = new FunctionContext([this](int _) { + timer_task = new LambdaContext([this](int) { timer_task = nullptr; complete(-ETIMEDOUT); }); @@ -337,7 +337,7 @@ private: caps_recalled += count; if ((throttled || count > 0) && (recall_timeout == 0 || duration < recall_timeout)) { C_ContextTimeout *ctx = new C_ContextTimeout( - mds, 1, new FunctionContext([this](int r) { + mds, 1, new LambdaContext([this](int r) { recall_client_state(); })); ctx->start_timer(); @@ -356,7 +356,7 @@ private: } else { uint64_t remaining = (recall_timeout == 0 ? 0 : recall_timeout-duration); C_ContextTimeout *ctx = new C_ContextTimeout( - mds, remaining, new FunctionContext([this](int r) { + mds, remaining, new LambdaContext([this](int r) { handle_recall_client_state(r); })); @@ -384,7 +384,7 @@ private: void flush_journal() { dout(20) << __func__ << dendl; - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { handle_flush_journal(r); }); @@ -415,7 +415,7 @@ private: auto [throttled, count] = do_trim(); if (throttled && count > 0) { - auto timer = new FunctionContext([this](int _) { + auto timer = new LambdaContext([this](int) { trim_cache(); }); mds->timer.add_event_after(1.0, timer); @@ -501,7 +501,7 @@ MDSRank::MDSRank( cluster_degraded(false), stopping(false), purge_queue(g_ceph_context, whoami_, mdsmap_->get_metadata_pool(), objecter, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { std::lock_guard l(mds_lock); handle_write_error(r); } @@ -3414,10 +3414,10 @@ bool MDSRank::evict_client(int64_t session_id, auto apply_blacklist = [this, cmd](std::function fn){ ceph_assert(ceph_mutex_is_locked_by_me(mds_lock)); - Context *on_blacklist_done = new FunctionContext([this, fn](int r) { + Context *on_blacklist_done = new LambdaContext([this, fn](int r) { objecter->wait_for_latest_osdmap( new C_OnFinisher( - new FunctionContext([this, fn](int r) { + new LambdaContext([this, fn](int r) { std::lock_guard l(mds_lock); auto epoch = objecter->with_osdmap([](const OSDMap &o){ return o.get_epoch(); @@ -3478,7 +3478,7 @@ void MDSRank::bcast_mds_map() } Context *MDSRank::create_async_exec_context(C_ExecAndReply *ctx) { - return new C_OnFinisher(new FunctionContext([ctx](int _) { + return new C_OnFinisher(new LambdaContext([ctx](int) { ctx->exec(); }), finisher); } @@ -3702,7 +3702,7 @@ void MDSRankDispatcher::handle_conf_change(const ConfigProxy& conf, const std::s update_log_config(); } - finisher->queue(new FunctionContext([this, changed](int r) { + finisher->queue(new LambdaContext([this, changed](int) { std::scoped_lock lock(mds_lock); if (changed.count("mds_log_pause") && !g_conf()->mds_log_pause) { @@ -3727,7 +3727,7 @@ void MDSRank::schedule_update_timer_task() { dout(20) << __func__ << dendl; timer.add_event_after(g_conf().get_val("mds_task_status_update_interval"), - new FunctionContext([this](int _) { + new LambdaContext([this](int) { send_task_status(); })); } diff --git a/src/mds/OpenFileTable.cc b/src/mds/OpenFileTable.cc index 57f6d577e2c27..0f354ff5a3b93 100644 --- a/src/mds/OpenFileTable.cc +++ b/src/mds/OpenFileTable.cc @@ -1074,7 +1074,7 @@ void OpenFileTable::_prefetch_dirfrags() if (gather.has_subs()) { gather.set_finisher( new MDSInternalContextWrapper(mds, - new FunctionContext(finish_func))); + new LambdaContext(std::move(finish_func)))); gather.activate(); } else { finish_func(0); @@ -1144,7 +1144,7 @@ bool OpenFileTable::prefetch_inodes() if (!load_done) { wait_for_load( new MDSInternalContextWrapper(mds, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { _prefetch_inodes(); }) ) diff --git a/src/mds/PurgeQueue.cc b/src/mds/PurgeQueue.cc index a7d5c7c198100..48710d1182311 100644 --- a/src/mds/PurgeQueue.cc +++ b/src/mds/PurgeQueue.cc @@ -161,7 +161,7 @@ void PurgeQueue::activate() if (in_flight.empty()) { dout(4) << "start work (by drain)" << dendl; - finisher.queue(new FunctionContext([this](int r) { + finisher.queue(new LambdaContext([this](int r) { std::lock_guard l(lock); _consume(); })); @@ -186,7 +186,7 @@ void PurgeQueue::open(Context *completion) if (completion) waiting_for_recovery.push_back(completion); - journaler.recover(new FunctionContext([this](int r){ + journaler.recover(new LambdaContext([this](int r){ if (r == -ENOENT) { dout(1) << "Purge Queue not found, assuming this is an upgrade and " "creating it." << dendl; @@ -237,7 +237,7 @@ void PurgeQueue::_recover() if (!journaler.is_readable() && !journaler.get_error() && journaler.get_read_pos() < journaler.get_write_pos()) { - journaler.wait_for_readable(new FunctionContext([this](int r) { + journaler.wait_for_readable(new LambdaContext([this](int r) { std::lock_guard l(lock); _recover(); })); @@ -279,7 +279,7 @@ void PurgeQueue::create(Context *fin) layout.pool_id = metadata_pool; journaler.set_writeable(); journaler.create(&layout, JOURNAL_FORMAT_RESILIENT); - journaler.write_head(new FunctionContext([this](int r) { + journaler.write_head(new LambdaContext([this](int r) { std::lock_guard l(lock); if (r) { _go_readonly(r); @@ -322,7 +322,7 @@ void PurgeQueue::push(const PurgeItem &pi, Context *completion) // we should flush in order to allow MDCache to drop its strays rather // than having them wait for purgequeue to progress. if (!delayed_flush) { - delayed_flush = new FunctionContext([this](int r){ + delayed_flush = new LambdaContext([this](int r){ delayed_flush = nullptr; journaler.flush(); }); @@ -435,7 +435,7 @@ bool PurgeQueue::_consume() // Because we are the writer and the reader of the journal // via the same Journaler instance, we never need to reread_head if (!journaler.have_waiter()) { - journaler.wait_for_readable(new FunctionContext([this](int r) { + journaler.wait_for_readable(new LambdaContext([this](int r) { std::lock_guard l(lock); if (r == 0) { _consume(); @@ -558,7 +558,7 @@ void PurgeQueue::_execute_item( ceph_assert(gather.has_subs()); gather.set_finisher(new C_OnFinisher( - new FunctionContext([this, expire_to](int r){ + new LambdaContext([this, expire_to](int r){ std::lock_guard l(lock); _execute_item_complete(expire_to); @@ -685,7 +685,7 @@ void PurgeQueue::handle_conf_change(const std::set& changed, const // might need to kick off consume. dout(4) << "maybe start work again (max_purge_files=" << g_conf()->mds_max_purge_files << dendl; - finisher.queue(new FunctionContext([this](int r){ + finisher.queue(new LambdaContext([this](int r){ std::lock_guard l(lock); _consume(); })); diff --git a/src/mds/Server.cc b/src/mds/Server.cc index ea1a0269f0f3d..e8649b86de173 100644 --- a/src/mds/Server.cc +++ b/src/mds/Server.cc @@ -420,7 +420,7 @@ void Server::finish_reclaim_session(Session *session, const ref_tget_client().v; - send_reply = new FunctionContext([this, session_id, reply](int r) { + send_reply = new LambdaContext([this, session_id, reply](int r) { assert(ceph_mutex_is_locked_by_me(mds->mds_lock)); Session *session = mds->sessionmap.get_session(entity_name_t::CLIENT(session_id)); if (!session) { @@ -632,7 +632,7 @@ void Server::handle_client_session(const cref_t &m) pv = mds->sessionmap.mark_projected(session); sseq = mds->sessionmap.set_state(session, Session::STATE_OPENING); mds->sessionmap.touch_session(session); - auto fin = new FunctionContext([log_session_status = std::move(log_session_status)](int r){ + auto fin = new LambdaContext([log_session_status = std::move(log_session_status)](int r){ ceph_assert(r == 0); log_session_status("ACCEPTED", ""); }); @@ -1577,7 +1577,7 @@ void Server::reconnect_tick() if (gather.has_subs()) { dout(1) << "reconnect will complete once clients are evicted" << dendl; - gather.set_finisher(new MDSInternalContextWrapper(mds, new FunctionContext( + gather.set_finisher(new MDSInternalContextWrapper(mds, new LambdaContext( [this](int r){reconnect_gather_finish();}))); gather.activate(); reconnect_evicting = true; diff --git a/src/mgr/ActivePyModules.cc b/src/mgr/ActivePyModules.cc index 40250a549a5f6..d9f28761a450b 100644 --- a/src/mgr/ActivePyModules.cc +++ b/src/mgr/ActivePyModules.cc @@ -411,7 +411,7 @@ void ActivePyModules::start_one(PyModuleRef py_module) // Send all python calls down a Finisher to avoid blocking // C++ code, and avoid any potential lock cycles. - finisher.queue(new FunctionContext([this, active_module, name](int) { + finisher.queue(new LambdaContext([this, active_module, name](int) { int r = active_module->load(this); if (r != 0) { derr << "Failed to run module in active mode ('" << name << "')" @@ -467,7 +467,7 @@ void ActivePyModules::notify_all(const std::string ¬ify_type, auto module = i.second.get(); // Send all python calls down a Finisher to avoid blocking // C++ code, and avoid any potential lock cycles. - finisher.queue(new FunctionContext([module, notify_type, notify_id](int r){ + finisher.queue(new LambdaContext([module, notify_type, notify_id](int r){ module->notify(notify_type, notify_id); })); } @@ -486,7 +486,7 @@ void ActivePyModules::notify_all(const LogEntry &log_entry) // Note intentional use of non-reference lambda binding on // log_entry: we take a copy because caller's instance is // probably ephemeral. - finisher.queue(new FunctionContext([module, log_entry](int r){ + finisher.queue(new LambdaContext([module, log_entry](int r){ module->notify_clog(log_entry); })); } @@ -981,7 +981,7 @@ void ActivePyModules::config_notify() auto module = i.second.get(); // Send all python calls down a Finisher to avoid blocking // C++ code, and avoid any potential lock cycles. - finisher.queue(new FunctionContext([module](int r){ + finisher.queue(new LambdaContext([module](int r){ module->config_notify(); })); } diff --git a/src/mgr/BaseMgrModule.cc b/src/mgr/BaseMgrModule.cc index 812e23d9fa67c..fb5d7efe6aa9e 100644 --- a/src/mgr/BaseMgrModule.cc +++ b/src/mgr/BaseMgrModule.cc @@ -152,9 +152,9 @@ ceph_send_command(BaseMgrModule *self, PyObject *args) // TODO: enhance MCommand interface so that it returns // latest cluster map versions on completion, and callers // can wait for those. - auto c = new FunctionContext([command_c, self](int command_r){ + auto c = new LambdaContext([command_c, self](int command_r){ self->py_modules->get_objecter().wait_for_latest_osdmap( - new FunctionContext([command_c, command_r](int wait_r){ + new LambdaContext([command_c, command_r](int wait_r){ command_c->complete(command_r); }) ); diff --git a/src/mgr/DaemonServer.cc b/src/mgr/DaemonServer.cc index efea20d093470..ace4366dfdc23 100644 --- a/src/mgr/DaemonServer.cc +++ b/src/mgr/DaemonServer.cc @@ -331,7 +331,7 @@ void DaemonServer::schedule_tick_locked(double delay_sec) return; tick_event = timer.add_event_after(delay_sec, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { tick(); })); } @@ -348,7 +348,7 @@ void DaemonServer::handle_osd_perf_metric_query_updated() // Send a fresh MMgrConfigure to all clients, so that they can follow // the new policy for transmitting stats - finisher.queue(new FunctionContext([this](int r) { + finisher.queue(new LambdaContext([this](int r) { std::lock_guard l(lock); for (auto &c : daemon_connections) { if (c->peer_is_osd()) { @@ -2245,7 +2245,7 @@ bool DaemonServer::_handle_command( } dout(10) << "passing through " << cmdctx->cmdmap.size() << dendl; - finisher.queue(new FunctionContext([this, cmdctx, handler_name, prefix](int r_) { + finisher.queue(new LambdaContext([this, cmdctx, handler_name, prefix](int r_) { std::stringstream ss; // Validate that the module is enabled @@ -2810,7 +2810,7 @@ void DaemonServer::handle_conf_change(const ConfigProxy& conf, << daemon_connections.size() << " clients" << dendl; // Send a fresh MMgrConfigure to all clients, so that they can follow // the new policy for transmitting stats - finisher.queue(new FunctionContext([this](int r) { + finisher.queue(new LambdaContext([this](int r) { std::lock_guard l(lock); for (auto &c : daemon_connections) { _send_configure(c); diff --git a/src/mgr/Mgr.cc b/src/mgr/Mgr.cc index 9620ca769bfde..f3730d813d279 100644 --- a/src/mgr/Mgr.cc +++ b/src/mgr/Mgr.cc @@ -158,7 +158,7 @@ void Mgr::background_init(Context *completion) finisher.start(); - finisher.queue(new FunctionContext([this, completion](int r){ + finisher.queue(new LambdaContext([this, completion](int r){ init(); completion->complete(0); })); @@ -400,7 +400,7 @@ void Mgr::load_all_metadata() void Mgr::shutdown() { - finisher.queue(new FunctionContext([&](int) { + finisher.queue(new LambdaContext([&](int) { { std::lock_guard l(lock); // First stop the server so that we're not taking any more incoming diff --git a/src/mgr/MgrClient.cc b/src/mgr/MgrClient.cc index 1f6d0cf6f5cac..a1075952bf9c2 100644 --- a/src/mgr/MgrClient.cc +++ b/src/mgr/MgrClient.cc @@ -150,7 +150,7 @@ void MgrClient::reconnect() if (!connect_retry_callback) { connect_retry_callback = timer.add_event_at( when, - new FunctionContext([this](int r){ + new LambdaContext([this](int r){ connect_retry_callback = nullptr; reconnect(); })); @@ -258,7 +258,7 @@ void MgrClient::_send_stats() if (stats_period != 0) { report_callback = timer.add_event_after( stats_period, - new FunctionContext([this](int) { + new LambdaContext([this](int) { _send_stats(); })); } diff --git a/src/mgr/MgrStandby.cc b/src/mgr/MgrStandby.cc index 35bbbfb46166f..3bcb68431c72d 100644 --- a/src/mgr/MgrStandby.cc +++ b/src/mgr/MgrStandby.cc @@ -253,7 +253,7 @@ void MgrStandby::tick() timer.add_event_after( g_conf().get_val("mgr_tick_period").count(), - new FunctionContext([this](int r){ + new LambdaContext([this](int r){ tick(); } )); @@ -269,7 +269,7 @@ void MgrStandby::handle_signal(int signum) void MgrStandby::shutdown() { - finisher.queue(new FunctionContext([&](int) { + finisher.queue(new LambdaContext([&](int) { std::lock_guard l(lock); dout(4) << "Shutting down" << dendl; @@ -395,7 +395,7 @@ void MgrStandby::handle_mgr_map(ref_t mmap) active_mgr.reset(new Mgr(&monc, map, &py_module_registry, client_messenger.get(), &objecter, &client, clog, audit_clog)); - active_mgr->background_init(new FunctionContext( + active_mgr->background_init(new LambdaContext( [this](int r){ // Advertise our active-ness ASAP instead of waiting for // next tick. diff --git a/src/mgr/StandbyPyModules.cc b/src/mgr/StandbyPyModules.cc index 5735b415e60cd..cff60aded2ab6 100644 --- a/src/mgr/StandbyPyModules.cc +++ b/src/mgr/StandbyPyModules.cc @@ -87,7 +87,7 @@ void StandbyPyModules::start_one(PyModuleRef py_module) // Send all python calls down a Finisher to avoid blocking // C++ code, and avoid any potential lock cycles. - finisher.queue(new FunctionContext([this, standby_module, name](int) { + finisher.queue(new LambdaContext([this, standby_module, name](int) { int r = standby_module->load(); if (r != 0) { derr << "Failed to run module in standby mode ('" << name << "')" diff --git a/src/mon/Elector.cc b/src/mon/Elector.cc index 403fbca0fcc3b..fcd59d688939f 100644 --- a/src/mon/Elector.cc +++ b/src/mon/Elector.cc @@ -151,9 +151,9 @@ void Elector::reset_timer(double plus) */ expire_event = mon->timer.add_event_after( g_conf()->mon_election_timeout + plus, - new C_MonContext(mon, [this](int) { + new C_MonContext{mon, [this](int) { logic.end_election_period(); - })); + }}); } diff --git a/src/mon/MDSMonitor.cc b/src/mon/MDSMonitor.cc index 2cad13dac5362..9a6af923d1eb6 100644 --- a/src/mon/MDSMonitor.cc +++ b/src/mon/MDSMonitor.cc @@ -640,7 +640,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) * know which FS it was part of. Nor does this matter. Sending an empty * MDSMap is sufficient for getting the MDS to respawn. */ - wait_for_finished_proposal(op, new FunctionContext([op, this](int r){ + wait_for_finished_proposal(op, new LambdaContext([op, this](int r){ if (r >= 0) { const auto& fsmap = get_fsmap(); MDSMap null_map; @@ -778,7 +778,7 @@ bool MDSMonitor::prepare_beacon(MonOpRequestRef op) dout(5) << "prepare_beacon pending map now:" << dendl; print_map(pending); - wait_for_finished_proposal(op, new FunctionContext([op, this](int r){ + wait_for_finished_proposal(op, new LambdaContext([op, this](int r){ if (r >= 0) _updated(op); // success else if (r == -ECANCELED) { diff --git a/src/mon/MgrMonitor.cc b/src/mon/MgrMonitor.cc index 67f6e51030708..502a7f6cac29d 100644 --- a/src/mon/MgrMonitor.cc +++ b/src/mon/MgrMonitor.cc @@ -291,9 +291,9 @@ void MgrMonitor::post_paxos_update() send_digests(); } else { cancel_timer(); - wait_for_active_ctx(new C_MonContext(mon, [this](int) { + wait_for_active_ctx(new C_MonContext{mon, [this](int) { send_digests(); - })); + }}); } } } @@ -651,9 +651,9 @@ void MgrMonitor::send_digests() timer: digest_event = mon->timer.add_event_after( g_conf().get_val("mon_mgr_digest_period"), - new C_MonContext(mon, [this](int) { + new C_MonContext{mon, [this](int) { send_digests(); - })); + }}); } void MgrMonitor::cancel_timer() diff --git a/src/mon/MonClient.cc b/src/mon/MonClient.cc index 11915ff237ca8..ce1860a9d887b 100644 --- a/src/mon/MonClient.cc +++ b/src/mon/MonClient.cc @@ -415,7 +415,7 @@ void MonClient::handle_monmap(MMonMap *m) void MonClient::handle_config(MConfig *m) { ldout(cct,10) << __func__ << " " << *m << dendl; - finisher.queue(new FunctionContext([this, m](int r) { + finisher.queue(new LambdaContext([this, m](int r) { cct->_conf.set_mon_vals(cct, m->config, config_cb); if (config_notify_cb) { config_notify_cb(); @@ -895,7 +895,7 @@ void MonClient::_un_backoff() void MonClient::schedule_tick() { - auto do_tick = make_lambda_context([this]() { tick(); }); + auto do_tick = make_lambda_context([this](int) { tick(); }); if (_hunting()) { const auto hunt_interval = (cct->_conf->mon_client_hunt_interval * reopen_interval_multiplier); diff --git a/src/mon/Monitor.cc b/src/mon/Monitor.cc index 892b2cde4a571..8d37cbd9161a8 100644 --- a/src/mon/Monitor.cc +++ b/src/mon/Monitor.cc @@ -120,14 +120,6 @@ MonCommand mon_commands[] = { #undef COMMAND #undef COMMAND_WITH_FLAG - - -void C_MonContext::finish(int r) { - if (mon->is_shutdown()) - return; - FunctionContext::finish(r); -} - Monitor::Monitor(CephContext* cct_, string nm, MonitorDBStore *s, Messenger *m, Messenger *mgr_m, MonMap *map) : Dispatcher(cct_), @@ -549,18 +541,18 @@ void Monitor::handle_conf_change(const ConfigProxy& conf, if (changed.count("mon_health_to_clog") || changed.count("mon_health_to_clog_interval") || changed.count("mon_health_to_clog_tick_interval")) { - finisher.queue(new C_MonContext(this, [this, changed](int) { + finisher.queue(new C_MonContext{this, [this, changed](int) { std::lock_guard l{lock}; health_to_clog_update_conf(changed); - })); + }}); } if (changed.count("mon_scrub_interval")) { int scrub_interval = conf->mon_scrub_interval; - finisher.queue(new C_MonContext(this, [this, scrub_interval](int) { + finisher.queue(new C_MonContext{this, [this, scrub_interval](int) { std::lock_guard l{lock}; scrub_update_interval(scrub_interval); - })); + }}); } } @@ -1449,9 +1441,9 @@ void Monitor::sync_reset_timeout() timer.cancel_event(sync_timeout_event); sync_timeout_event = timer.add_event_after( g_conf()->mon_sync_timeout, - new C_MonContext(this, [this](int) { + new C_MonContext{this, [this](int) { sync_timeout(); - })); + }}); } void Monitor::sync_finish(version_t last_committed) @@ -1791,9 +1783,9 @@ void Monitor::cancel_probe_timeout() void Monitor::reset_probe_timeout() { cancel_probe_timeout(); - probe_timeout_event = new C_MonContext(this, [this](int r) { + probe_timeout_event = new C_MonContext{this, [this](int r) { probe_timeout(r); - }); + }}; double t = g_conf()->mon_probe_timeout; if (timer.add_event_after(t, probe_timeout_event)) { dout(10) << "reset_probe_timeout " << probe_timeout_event @@ -2202,16 +2194,16 @@ void Monitor::win_election(epoch_t epoch, const set& active, uint64_t featu // Freshen the health status before doing health_to_clog in case // our just-completed election changed the health - healthmon()->wait_for_active_ctx(new FunctionContext([this](int r){ + healthmon()->wait_for_active_ctx(new LambdaContext([this](int r){ dout(20) << "healthmon now active" << dendl; healthmon()->tick(); if (healthmon()->is_proposing()) { dout(20) << __func__ << " healthmon proposing, waiting" << dendl; - healthmon()->wait_for_finished_proposal(nullptr, new C_MonContext(this, + healthmon()->wait_for_finished_proposal(nullptr, new C_MonContext{this, [this](int r){ ceph_assert(ceph_mutex_is_locked_by_me(lock)); do_health_to_clog_interval(); - })); + }}); } else { do_health_to_clog_interval(); @@ -2631,11 +2623,11 @@ void Monitor::health_tick_start() health_tick_stop(); health_tick_event = timer.add_event_after( cct->_conf->mon_health_to_clog_tick_interval, - new C_MonContext(this, [this](int r) { + new C_MonContext{this, [this](int r) { if (r < 0) return; health_tick_start(); - })); + }}); } void Monitor::health_tick_stop() @@ -2677,11 +2669,11 @@ void Monitor::health_interval_start() health_interval_stop(); auto next = health_interval_calc_next_update(); - health_interval_event = new C_MonContext(this, [this](int r) { + health_interval_event = new C_MonContext{this, [this](int r) { if (r < 0) return; do_health_to_clog_interval(); - }); + }}; if (!timer.add_event_at(next, health_interval_event)) { health_interval_event = nullptr; } @@ -4771,9 +4763,9 @@ void Monitor::timecheck_reset_event() timecheck_event = timer.add_event_after( delay, - new C_MonContext(this, [this](int) { + new C_MonContext{this, [this](int) { timecheck_start_round(); - })); + }}); } void Monitor::timecheck_check_skews() @@ -5628,9 +5620,9 @@ void Monitor::scrub_event_start() scrub_event = timer.add_event_after( cct->_conf->mon_scrub_interval, - new C_MonContext(this, [this](int) { + new C_MonContext{this, [this](int) { scrub_start(); - })); + }}); } void Monitor::scrub_event_cancel() @@ -5656,17 +5648,17 @@ void Monitor::scrub_reset_timeout() scrub_cancel_timeout(); scrub_timeout_event = timer.add_event_after( g_conf()->mon_scrub_timeout, - new C_MonContext(this, [this](int) { + new C_MonContext{this, [this](int) { scrub_timeout(); - })); + }}); } /************ TICK ***************/ void Monitor::new_tick() { - timer.add_event_after(g_conf()->mon_tick_interval, new C_MonContext(this, [this](int) { + timer.add_event_after(g_conf()->mon_tick_interval, new C_MonContext{this, [this](int) { tick(); - })); + }}); } void Monitor::tick() diff --git a/src/mon/Monitor.h b/src/mon/Monitor.h index ae79efc7a7ca6..aa5f0612be983 100644 --- a/src/mon/Monitor.h +++ b/src/mon/Monitor.h @@ -104,14 +104,6 @@ class AdminSocketHook; #define COMPAT_SET_LOC "feature_set" -class C_MonContext final : public FunctionContext { - const Monitor *mon; -public: - explicit C_MonContext(Monitor *m, boost::function&& callback) - : FunctionContext(std::move(callback)), mon(m) {} - void finish(int r) override; -}; - class Monitor : public Dispatcher, public AuthClient, public AuthServer, @@ -1042,5 +1034,32 @@ public: // make sure you add your feature to Monitor::get_supported_features +/* Callers use: + * + * new C_MonContext{...} + * + * instead of + * + * new C_MonContext(...) + * + * because of gcc bug [1]. + * + * [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85883 + */ +template +class C_MonContext : public LambdaContext { +public: + C_MonContext(const Monitor* m, T&& f) : + LambdaContext(std::forward(f)), + mon(m) + {} + void finish(int r) override { + if (mon->is_shutdown()) + return; + LambdaContext::finish(r); + } +private: + const Monitor* mon; +}; #endif diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index d787e58977cac..9b08b0bba7b43 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -2992,7 +2992,7 @@ bool OSDMonitor::prepare_mark_me_dead(MonOpRequestRef op) pending_inc.new_xinfo[target_osd].dead_epoch = m->get_epoch(); wait_for_finished_proposal( op, - new FunctionContext( + new LambdaContext( [op, this] (int r) { if (r >= 0) { mon->no_reply(op); // ignore on success diff --git a/src/mon/Paxos.cc b/src/mon/Paxos.cc index 2859ff2ce4786..a6bf59b853fc5 100644 --- a/src/mon/Paxos.cc +++ b/src/mon/Paxos.cc @@ -203,11 +203,11 @@ void Paxos::collect(version_t oldpn) collect_timeout_event = mon->timer.add_event_after( g_conf()->mon_accept_timeout_factor * g_conf()->mon_lease, - new C_MonContext(mon, [this](int r) { + new C_MonContext{mon, [this](int r) { if (r == -ECANCELED) return; collect_timeout(); - })); + }}); } @@ -694,11 +694,11 @@ void Paxos::begin(bufferlist& v) // set timeout event accept_timeout_event = mon->timer.add_event_after( g_conf()->mon_accept_timeout_factor * g_conf()->mon_lease, - new C_MonContext(mon, [this](int r) { + new C_MonContext{mon, [this](int r) { if (r == -ECANCELED) return; accept_timeout(); - })); + }}); } // peon @@ -995,11 +995,11 @@ void Paxos::extend_lease() if (!lease_ack_timeout_event) { lease_ack_timeout_event = mon->timer.add_event_after( g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease, - new C_MonContext(mon, [this](int r) { + new C_MonContext{mon, [this](int r) { if (r == -ECANCELED) return; lease_ack_timeout(); - })); + }}); } // set renew event @@ -1008,11 +1008,11 @@ void Paxos::extend_lease() at += ceph::make_timespan(g_conf()->mon_lease_renew_interval_factor * g_conf()->mon_lease); lease_renew_event = mon->timer.add_event_at( - at, new C_MonContext(mon, [this](int r) { + at, new C_MonContext{mon, [this](int r) { if (r == -ECANCELED) return; lease_renew_timeout(); - })); + }}); } void Paxos::warn_on_future_time(utime_t t, entity_name_t from) @@ -1198,11 +1198,11 @@ void Paxos::reset_lease_timeout() mon->timer.cancel_event(lease_timeout_event); lease_timeout_event = mon->timer.add_event_after( g_conf()->mon_lease_ack_timeout_factor * g_conf()->mon_lease, - new C_MonContext(mon, [this](int r) { + new C_MonContext{mon, [this](int r) { if (r == -ECANCELED) return; lease_timeout(); - })); + }}); } void Paxos::lease_timeout() diff --git a/src/mon/PaxosService.cc b/src/mon/PaxosService.cc index c71d026ffba72..1aea42e4841ac 100644 --- a/src/mon/PaxosService.cc +++ b/src/mon/PaxosService.cc @@ -117,7 +117,7 @@ bool PaxosService::dispatch(MonOpRequestRef op) * Callback class used to propose the pending value once the proposal_timer * fires up. */ - auto do_propose = new C_MonContext(mon, [this](int r) { + auto do_propose = new C_MonContext{mon, [this](int r) { proposal_timer = 0; if (r >= 0) { propose_pending(); @@ -126,7 +126,7 @@ bool PaxosService::dispatch(MonOpRequestRef op) } else { ceph_abort_msg("bad return value for proposal_timer"); } - }); + }}; dout(10) << " setting proposal_timer " << do_propose << " with delay of " << delay << dendl; proposal_timer = mon->timer.add_event_after(delay, do_propose); diff --git a/src/mon/QuorumService.h b/src/mon/QuorumService.h index eea7f3e371c9c..5559b15c26385 100644 --- a/src/mon/QuorumService.h +++ b/src/mon/QuorumService.h @@ -59,11 +59,11 @@ protected: if (tick_period <= 0) return; - tick_event = new C_MonContext(mon, [this](int r) { + tick_event = new C_MonContext{mon, [this](int r) { if (r < 0) return; tick(); - }); + }}; mon->timer.add_event_after(tick_period, tick_event); } diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index af29386c900e1..2d8760e0093f0 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -5858,7 +5858,7 @@ void OSD::_preboot(epoch_t oldest, epoch_t newest) // this thread might be required for splitting and merging PGs to // make progress. boot_finisher.queue( - new FunctionContext( + new LambdaContext( [this](int r) { std::unique_lock l(osd_lock); if (is_preboot()) { @@ -9628,7 +9628,7 @@ void OSD::do_recovery( std::lock_guard l(service.sleep_lock); if (recovery_sleep > 0 && service.recovery_needs_sleep) { PGRef pgref(pg); - auto recovery_requeue_callback = new FunctionContext([this, pgref, queued, reserved_pushes](int r) { + auto recovery_requeue_callback = new LambdaContext([this, pgref, queued, reserved_pushes](int r) { dout(20) << "do_recovery wake up at " << ceph_clock_now() << ", re-queuing recovery" << dendl; diff --git a/src/osd/PG.cc b/src/osd/PG.cc index af2df32d7115e..3d02bab05808c 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -2459,7 +2459,7 @@ void PG::scrub(epoch_t queued, ThreadPool::TPHandle &handle) spg_t pgid = get_pgid(); int state = scrubber.state; auto scrub_requeue_callback = - new FunctionContext([osds, pgid, state](int r) { + new LambdaContext([osds, pgid, state](int r) { PGRef pg = osds->osd->lookup_lock_pg(pgid); if (pg == nullptr) { lgeneric_dout(osds->osd->cct, 20) @@ -3743,7 +3743,7 @@ void PG::do_delete_work(ObjectStore::Transaction &t) if (osd_delete_sleep > 0 && delete_needs_sleep) { epoch_t e = get_osdmap()->get_epoch(); PGRef pgref(this); - auto delete_requeue_callback = new FunctionContext([this, pgref, e](int r) { + auto delete_requeue_callback = new LambdaContext([this, pgref, e](int r) { dout(20) << __func__ << " wake up at " << ceph_clock_now() << ", re-queuing delete" << dendl; diff --git a/src/osd/PGBackend.cc b/src/osd/PGBackend.cc index 361293833c07c..0f4f628002519 100644 --- a/src/osd/PGBackend.cc +++ b/src/osd/PGBackend.cc @@ -136,7 +136,7 @@ void PGBackend::handle_recovery_delete(OpRequestRef op) reply->objects = m->objects; ConnectionRef conn = m->get_connection(); - gather.set_finisher(new FunctionContext( + gather.set_finisher(new LambdaContext( [=](int r) { if (r != -EAGAIN) { get_parent()->send_message_osd_cluster(reply, conn.get()); diff --git a/src/osd/PrimaryLogPG.cc b/src/osd/PrimaryLogPG.cc index 1c9286920b480..fa58df8231e57 100644 --- a/src/osd/PrimaryLogPG.cc +++ b/src/osd/PrimaryLogPG.cc @@ -11275,7 +11275,7 @@ int PrimaryLogPG::recover_missing( ceph_assert(!recovering.count(soid)); recovering.insert(make_pair(soid, ObjectContextRef())); epoch_t cur_epoch = get_osdmap_epoch(); - remove_missing_object(soid, v, new FunctionContext( + remove_missing_object(soid, v, new LambdaContext( [=](int) { std::scoped_lock locker{*this}; if (!pg_has_reset_since(cur_epoch)) { @@ -11358,7 +11358,7 @@ void PrimaryLogPG::remove_missing_object(const hobject_t &soid, recovery_info.version = v; epoch_t cur_epoch = get_osdmap_epoch(); - t.register_on_complete(new FunctionContext( + t.register_on_complete(new LambdaContext( [=](int) { std::unique_lock locker{*this}; if (!pg_has_reset_since(cur_epoch)) { @@ -11529,7 +11529,7 @@ void PrimaryLogPG::do_update_log_missing(OpRequestRef &op) m->entries, t, op_trim_to, op_roll_forward_to); eversion_t new_lcod = info.last_complete; - Context *complete = new FunctionContext( + Context *complete = new LambdaContext( [=](int) { const MOSDPGUpdateLogMissing *msg = static_cast( op->get_req()); diff --git a/src/osdc/ObjectCacher.cc b/src/osdc/ObjectCacher.cc index eef08b9b2d7f1..b5c336b3a9a94 100644 --- a/src/osdc/ObjectCacher.cc +++ b/src/osdc/ObjectCacher.cc @@ -2514,7 +2514,7 @@ void ObjectCacher::discard_writeback(ObjectSet *oset, if (gather.has_subs()) { bool flushed = was_dirty && oset->dirty_or_tx == 0; - gather.set_finisher(new FunctionContext( + gather.set_finisher(new LambdaContext( [this, oset, flushed, on_finish](int) { ceph_assert(ceph_mutex_is_locked(lock)); if (flushed && flush_set_callback) diff --git a/src/test/immutable_object_cache/test_DomainSocket.cc b/src/test/immutable_object_cache/test_DomainSocket.cc index ad9e79a0bfa3d..3a538a3191cb8 100644 --- a/src/test/immutable_object_cache/test_DomainSocket.cc +++ b/src/test/immutable_object_cache/test_DomainSocket.cc @@ -58,7 +58,7 @@ public: } } - auto ctx = new FunctionContext([](int reg) { + auto ctx = new LambdaContext([](int reg) { ASSERT_TRUE(reg == 0); }); m_cache_client->register_client(ctx); diff --git a/src/test/immutable_object_cache/test_multi_session.cc b/src/test/immutable_object_cache/test_multi_session.cc index 8703a69f76fc9..e3a73bc373ca1 100644 --- a/src/test/immutable_object_cache/test_multi_session.cc +++ b/src/test/immutable_object_cache/test_multi_session.cc @@ -106,7 +106,7 @@ public: void test_register_client(uint64_t random_index) { ASSERT_TRUE(m_cache_client_vec[random_index] == nullptr); - auto ctx = new FunctionContext([](int ret){ + auto ctx = new LambdaContext([](int ret){ ASSERT_TRUE(ret == 0); }); auto session = create_session(random_index); diff --git a/src/test/librados_test_stub/TestRadosClient.cc b/src/test/librados_test_stub/TestRadosClient.cc index 1b65c0fd30f9b..c039dfb3b572b 100644 --- a/src/test/librados_test_stub/TestRadosClient.cc +++ b/src/test/librados_test_stub/TestRadosClient.cc @@ -71,7 +71,7 @@ public: int ret = m_callback(); if (m_comp != NULL) { if (m_finisher != NULL) { - m_finisher->queue(new FunctionContext(boost::bind( + m_finisher->queue(new LambdaContext(boost::bind( &finish_aio_completion, m_comp, ret))); } else { finish_aio_completion(m_comp, ret); @@ -203,7 +203,7 @@ void TestRadosClient::add_aio_operation(const std::string& oid, struct WaitForFlush { int flushed() { if (--count == 0) { - aio_finisher->queue(new FunctionContext(boost::bind( + aio_finisher->queue(new LambdaContext(boost::bind( &finish_aio_completion, c, 0))); delete this; } @@ -240,7 +240,7 @@ void TestRadosClient::flush_aio_operations(AioCompletionImpl *c) { int TestRadosClient::aio_watch_flush(AioCompletionImpl *c) { c->get(); - Context *ctx = new FunctionContext(boost::bind( + Context *ctx = new LambdaContext(boost::bind( &TestRadosClient::finish_aio_completion, this, c, _1)); get_watch_notify()->aio_flush(this, ctx); return 0; diff --git a/src/test/librados_test_stub/TestWatchNotify.cc b/src/test/librados_test_stub/TestWatchNotify.cc index 2fa14400fc51c..9da9971fa802c 100644 --- a/src/test/librados_test_stub/TestWatchNotify.cc +++ b/src/test/librados_test_stub/TestWatchNotify.cc @@ -42,7 +42,7 @@ struct TestWatchNotify::ObjectHandler : public TestCluster::ObjectHandler { auto _pool_id = pool_id; auto _nspace = nspace; auto _oid = oid; - auto ctx = new FunctionContext([_test_watch_notify, _pool_id, _nspace, _oid](int r) { + auto ctx = new LambdaContext([_test_watch_notify, _pool_id, _nspace, _oid](int r) { _test_watch_notify->handle_object_removed(_pool_id, _nspace, _oid); }); test_rados_client->get_aio_finisher()->queue(ctx); @@ -107,7 +107,7 @@ void TestWatchNotify::aio_watch(TestRadosClient *rados_client, int64_t pool_id, librados::WatchCtx *watch_ctx, librados::WatchCtx2 *watch_ctx2, Context *on_finish) { - auto ctx = new FunctionContext([=](int) { + auto ctx = new LambdaContext([=](int) { execute_watch(rados_client, pool_id, nspace, o, gid, handle, watch_ctx, watch_ctx2, on_finish); }); @@ -123,7 +123,7 @@ int TestWatchNotify::unwatch(TestRadosClient *rados_client, void TestWatchNotify::aio_unwatch(TestRadosClient *rados_client, uint64_t handle, Context *on_finish) { - auto ctx = new FunctionContext([this, rados_client, handle, on_finish](int) { + auto ctx = new LambdaContext([this, rados_client, handle, on_finish](int) { execute_unwatch(rados_client, handle, on_finish); }); rados_client->get_aio_finisher()->queue(ctx); @@ -134,7 +134,7 @@ void TestWatchNotify::aio_notify(TestRadosClient *rados_client, int64_t pool_id, const std::string& oid, const bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl, Context *on_notify) { - auto ctx = new FunctionContext([=](int) { + auto ctx = new LambdaContext([=](int) { execute_notify(rados_client, pool_id, nspace, oid, bl, pbl, on_notify); }); rados_client->get_aio_finisher()->queue(ctx); @@ -293,7 +293,7 @@ void TestWatchNotify::execute_notify(TestRadosClient *rados_client, m_async_op_tracker.start_op(); uint64_t notifier_id = rados_client->get_instance_id(); - watch_handle.rados_client->get_aio_finisher()->queue(new FunctionContext( + watch_handle.rados_client->get_aio_finisher()->queue(new LambdaContext( [this, pool_id, nspace, oid, bl, notify_id, watch_handle, notifier_id](int r) { bufferlist notify_bl; notify_bl.append(bl); @@ -438,7 +438,7 @@ void TestWatchNotify::handle_object_removed(int64_t pool_id, auto handle = watch_handle.handle; auto watch_ctx2 = watch_handle.watch_ctx2; if (watch_ctx2 != nullptr) { - auto ctx = new FunctionContext([handle, watch_ctx2](int) { + auto ctx = new LambdaContext([handle, watch_ctx2](int) { watch_ctx2->handle_error(handle, -ENOTCONN); }); watch_handle.rados_client->get_aio_finisher()->queue(ctx); diff --git a/src/test/librbd/cache/test_mock_ParentImageCache.cc b/src/test/librbd/cache/test_mock_ParentImageCache.cc index 632ce1bd573f4..d48b5ec1993e5 100644 --- a/src/test/librbd/cache/test_mock_ParentImageCache.cc +++ b/src/test/librbd/cache/test_mock_ParentImageCache.cc @@ -151,12 +151,12 @@ TEST_F(TestMockParentImageCache, test_initialization_success) { expect_cache_run(*mock_parent_image_cache, 0); C_SaferCond cond; - Context* handle_connect = new FunctionContext([&cond](int ret) { + Context* handle_connect = new LambdaContext([&cond](int ret) { ASSERT_EQ(ret, 0); cond.complete(0); }); expect_cache_async_connect(*mock_parent_image_cache, 0, handle_connect); - Context* ctx = new FunctionContext([](bool reg) { + Context* ctx = new LambdaContext([](bool reg) { ASSERT_EQ(reg, true); }); expect_cache_register(*mock_parent_image_cache, ctx, 0); @@ -189,7 +189,7 @@ TEST_F(TestMockParentImageCache, test_initialization_fail_at_connect) { expect_cache_run(*mock_parent_image_cache, 0); C_SaferCond cond; - Context* handle_connect = new FunctionContext([&cond](int ret) { + Context* handle_connect = new LambdaContext([&cond](int ret) { ASSERT_EQ(ret, -1); cond.complete(0); }); @@ -223,12 +223,12 @@ TEST_F(TestMockParentImageCache, test_initialization_fail_at_register) { expect_cache_run(*mock_parent_image_cache, 0); C_SaferCond cond; - Context* handle_connect = new FunctionContext([&cond](int ret) { + Context* handle_connect = new LambdaContext([&cond](int ret) { ASSERT_EQ(ret, 0); cond.complete(0); }); expect_cache_async_connect(*mock_parent_image_cache, 0, handle_connect); - Context* ctx = new FunctionContext([](bool reg) { + Context* ctx = new LambdaContext([](bool reg) { ASSERT_EQ(reg, false); }); expect_cache_register(*mock_parent_image_cache, ctx, -1); @@ -300,12 +300,12 @@ TEST_F(TestMockParentImageCache, test_read) { expect_cache_run(*mock_parent_image_cache, 0); C_SaferCond conn_cond; - Context* handle_connect = new FunctionContext([&conn_cond](int ret) { + Context* handle_connect = new LambdaContext([&conn_cond](int ret) { ASSERT_EQ(ret, 0); conn_cond.complete(0); }); expect_cache_async_connect(*mock_parent_image_cache, 0, handle_connect); - Context* ctx = new FunctionContext([](bool reg) { + Context* ctx = new LambdaContext([](bool reg) { ASSERT_EQ(reg, true); }); expect_cache_register(*mock_parent_image_cache, ctx, 0); diff --git a/src/test/librbd/deep_copy/test_mock_ObjectCopyRequest.cc b/src/test/librbd/deep_copy/test_mock_ObjectCopyRequest.cc index d4cfa7f84911c..63c8adb2aac1c 100644 --- a/src/test/librbd/deep_copy/test_mock_ObjectCopyRequest.cc +++ b/src/test/librbd/deep_copy/test_mock_ObjectCopyRequest.cc @@ -167,8 +167,7 @@ public: if ((m_src_image_ctx->features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { return; } - EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce( - ReturnNew([](int) {})); + EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce(Return(new LambdaContext([](int){}))); } void expect_list_snaps(librbd::MockTestImageCtx &mock_image_ctx, diff --git a/src/test/librbd/deep_copy/test_mock_SetHeadRequest.cc b/src/test/librbd/deep_copy/test_mock_SetHeadRequest.cc index 48c2f3b0f40b0..f0714f9d30330 100644 --- a/src/test/librbd/deep_copy/test_mock_SetHeadRequest.cc +++ b/src/test/librbd/deep_copy/test_mock_SetHeadRequest.cc @@ -110,8 +110,7 @@ public: } void expect_start_op(librbd::MockExclusiveLock &mock_exclusive_lock) { - EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce( - ReturnNew([](int) {})); + EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce(Return(new LambdaContext([](int){}))); } void expect_test_features(librbd::MockTestImageCtx &mock_image_ctx, diff --git a/src/test/librbd/deep_copy/test_mock_SnapshotCopyRequest.cc b/src/test/librbd/deep_copy/test_mock_SnapshotCopyRequest.cc index 2e15b4d4d85e7..56c5c78a6a533 100644 --- a/src/test/librbd/deep_copy/test_mock_SnapshotCopyRequest.cc +++ b/src/test/librbd/deep_copy/test_mock_SnapshotCopyRequest.cc @@ -148,8 +148,7 @@ public: if ((m_src_image_ctx->features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { return; } - EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce( - ReturnNew([](int) {})); + EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce(Return(new LambdaContext([](int){}))); } void expect_get_snap_namespace(librbd::MockTestImageCtx &mock_image_ctx, diff --git a/src/test/librbd/deep_copy/test_mock_SnapshotCreateRequest.cc b/src/test/librbd/deep_copy/test_mock_SnapshotCreateRequest.cc index 57945e70cf2da..27435b72a254c 100644 --- a/src/test/librbd/deep_copy/test_mock_SnapshotCreateRequest.cc +++ b/src/test/librbd/deep_copy/test_mock_SnapshotCreateRequest.cc @@ -89,8 +89,7 @@ public: } void expect_start_op(librbd::MockExclusiveLock &mock_exclusive_lock) { - EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce( - ReturnNew([](int) {})); + EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce(Return(new LambdaContext([](int){}))); } void expect_test_features(librbd::MockTestImageCtx &mock_image_ctx, diff --git a/src/test/librbd/journal/test_mock_Replay.cc b/src/test/librbd/journal/test_mock_Replay.cc index 7f935bbcd762e..7b65b9618076a 100644 --- a/src/test/librbd/journal/test_mock_Replay.cc +++ b/src/test/librbd/journal/test_mock_Replay.cc @@ -122,7 +122,7 @@ ACTION_P2(NotifyInvoke, lock, cond) { } ACTION_P2(CompleteAioCompletion, r, image_ctx) { - image_ctx->op_work_queue->queue(new FunctionContext([this, arg0](int r) { + image_ctx->op_work_queue->queue(new LambdaContext([this, arg0](int r) { arg0->get(); arg0->init_time(image_ctx, librbd::io::AIO_TYPE_NONE); arg0->set_request_count(1); diff --git a/src/test/librbd/operation/test_mock_Request.cc b/src/test/librbd/operation/test_mock_Request.cc index 81215e1ca9070..5c5e7a37535d2 100644 --- a/src/test/librbd/operation/test_mock_Request.cc +++ b/src/test/librbd/operation/test_mock_Request.cc @@ -109,7 +109,7 @@ struct TestMockOperationRequest : public TestMockFixture { EXPECT_CALL(mock_request, send_op()) .WillOnce(Invoke([&mock_image_ctx, &mock_request, r]() { mock_image_ctx.image_ctx->op_work_queue->queue( - new FunctionContext([&mock_request, r](int _) { + new LambdaContext([&mock_request, r](int _) { mock_request.send_op_impl(r); }), 0); })); diff --git a/src/test/librbd/operation/test_mock_ResizeRequest.cc b/src/test/librbd/operation/test_mock_ResizeRequest.cc index 95d31d1bc311d..4cbf914313dd1 100644 --- a/src/test/librbd/operation/test_mock_ResizeRequest.cc +++ b/src/test/librbd/operation/test_mock_ResizeRequest.cc @@ -157,7 +157,7 @@ public: EXPECT_CALL(mock_io_image_dispatch_spec, send()) .WillOnce(Invoke([&mock_image_ctx, &mock_io_image_dispatch_spec, r]() { auto aio_comp = mock_io_image_dispatch_spec.s_instance->aio_comp; - auto ctx = new FunctionContext([aio_comp](int r) { + auto ctx = new LambdaContext([aio_comp](int r) { aio_comp->fail(r); }); mock_image_ctx.image_ctx->op_work_queue->queue(ctx, r); diff --git a/src/test/librbd/test_ObjectMap.cc b/src/test/librbd/test_ObjectMap.cc index c5d636559cba1..8e80beeacb51a 100644 --- a/src/test/librbd/test_ObjectMap.cc +++ b/src/test/librbd/test_ObjectMap.cc @@ -193,7 +193,7 @@ TEST_F(TestObjectMap, DISABLED_StressTest) { throttle.start_op(); uint64_t object_no = (rand() % object_count); - auto ctx = new FunctionContext([&throttle, object_no](int r) { + auto ctx = new LambdaContext([&throttle, object_no](int r) { ASSERT_EQ(0, r) << "object_no=" << object_no; throttle.end_op(r); }); diff --git a/src/test/librbd/test_mock_DeepCopyRequest.cc b/src/test/librbd/test_mock_DeepCopyRequest.cc index 00c457bf0a8cb..d888fbf3cf291 100644 --- a/src/test/librbd/test_mock_DeepCopyRequest.cc +++ b/src/test/librbd/test_mock_DeepCopyRequest.cc @@ -170,8 +170,7 @@ public: } void expect_start_op(librbd::MockExclusiveLock &mock_exclusive_lock) { - EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce( - ReturnNew([](int) {})); + EXPECT_CALL(mock_exclusive_lock, start_op(_)).WillOnce(Return(new LambdaContext([](int){}))); } void expect_rollback_object_map(librbd::MockObjectMap &mock_object_map, int r) { diff --git a/src/test/librbd/test_mock_Journal.cc b/src/test/librbd/test_mock_Journal.cc index d124b236a5dfe..dcc74fcc889bc 100644 --- a/src/test/librbd/test_mock_Journal.cc +++ b/src/test/librbd/test_mock_Journal.cc @@ -492,7 +492,7 @@ public: mock_image_ctx.image_ctx->op_work_queue->queue(ctx, r); } - on_flush = new FunctionContext([on_flush](int r) { + on_flush = new LambdaContext([on_flush](int r) { derr << "FLUSH START" << dendl; on_flush->complete(r); derr << "FLUSH FINISH" << dendl; diff --git a/src/test/librbd/test_mock_Watcher.cc b/src/test/librbd/test_mock_Watcher.cc index ce052022799e8..704d7895b0074 100644 --- a/src/test/librbd/test_mock_Watcher.cc +++ b/src/test/librbd/test_mock_Watcher.cc @@ -76,7 +76,7 @@ public: } c->get(); - mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([mock_rados_client, action, c](int r) { + mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([mock_rados_client, action, c](int r) { if (action) { action(); } @@ -97,7 +97,7 @@ public: .WillOnce(DoAll(Invoke([this, &mock_image_ctx, mock_rados_client, r, action]( uint64_t handle, librados::AioCompletionImpl *c) { c->get(); - mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([mock_rados_client, action, c](int r) { + mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([mock_rados_client, action, c](int r) { if (action) { action(); } diff --git a/src/test/librbd/watcher/test_mock_RewatchRequest.cc b/src/test/librbd/watcher/test_mock_RewatchRequest.cc index 1ff059a0ed47d..5d3380954d9d4 100644 --- a/src/test/librbd/watcher/test_mock_RewatchRequest.cc +++ b/src/test/librbd/watcher/test_mock_RewatchRequest.cc @@ -34,7 +34,7 @@ struct TestMockWatcherRewatchRequest : public TestMockFixture { .WillOnce(DoAll(WithArgs<1, 2>(Invoke([&mock_image_ctx, &mock_io_ctx, r](librados::AioCompletionImpl *c, uint64_t *cookie) { *cookie = 234; c->get(); - mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([&mock_io_ctx, c](int r) { + mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([&mock_io_ctx, c](int r) { mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r); }), r); })), @@ -49,7 +49,7 @@ struct TestMockWatcherRewatchRequest : public TestMockFixture { .WillOnce(DoAll(Invoke([&mock_image_ctx, &mock_io_ctx, r](uint64_t handle, librados::AioCompletionImpl *c) { c->get(); - mock_image_ctx.image_ctx->op_work_queue->queue(new FunctionContext([&mock_io_ctx, c](int r) { + mock_image_ctx.image_ctx->op_work_queue->queue(new LambdaContext([&mock_io_ctx, c](int r) { mock_io_ctx.get_mock_rados_client()->finish_aio_completion(c, r); }), r); }), diff --git a/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc b/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc index dde78029611fa..0084181200a90 100644 --- a/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc +++ b/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc @@ -141,11 +141,12 @@ public: void expect_start_op(librbd::MockTestImageCtx &mock_image_ctx, bool success) { EXPECT_CALL(*mock_image_ctx.exclusive_lock, start_op(_)) .WillOnce(Invoke([success](int* r) { + auto f = [](int r) {}; if (!success) { *r = -EROFS; - return static_cast(nullptr); + return static_cast*>(nullptr); } - return new FunctionContext([](int r) {}); + return new LambdaContext(std::move(f)); })); } diff --git a/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc b/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc index d1a707ed803ba..a3961d9f6a6fa 100644 --- a/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc +++ b/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc @@ -180,7 +180,7 @@ public: EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { auto wrapped_ctx = - new FunctionContext([this, ctx](int r) { + new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); diff --git a/src/test/rbd_mirror/test_mock_ImageMap.cc b/src/test/rbd_mirror/test_mock_ImageMap.cc index 4dfd669259a1a..3f810704a33ea 100644 --- a/src/test/rbd_mirror/test_mock_ImageMap.cc +++ b/src/test/rbd_mirror/test_mock_ImageMap.cc @@ -188,7 +188,7 @@ public: void expect_add_event(MockThreads &mock_threads) { EXPECT_CALL(*mock_threads.timer, add_event_after(_,_)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { - auto wrapped_ctx = new FunctionContext([this, ctx](int r) { + auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); @@ -203,7 +203,7 @@ public: CephContext *cct = reinterpret_cast(m_local_io_ctx.cct()); cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "0"); - auto wrapped_ctx = new FunctionContext([this, ctx](int r) { + auto wrapped_ctx = new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); diff --git a/src/test/rbd_mirror/test_mock_InstanceReplayer.cc b/src/test/rbd_mirror/test_mock_InstanceReplayer.cc index c3eca03bb7337..9779758c9d364 100644 --- a/src/test/rbd_mirror/test_mock_InstanceReplayer.cc +++ b/src/test/rbd_mirror/test_mock_InstanceReplayer.cc @@ -150,7 +150,7 @@ public: mock_threads.timer_cond.notify_one(); } else { m_threads->work_queue->queue( - new FunctionContext([&mock_threads, ctx](int) { + new LambdaContext([&mock_threads, ctx](int) { std::lock_guard timer_lock{mock_threads.timer_lock}; ctx->complete(0); }), 0); diff --git a/src/test/rbd_mirror/test_mock_InstanceWatcher.cc b/src/test/rbd_mirror/test_mock_InstanceWatcher.cc index 78bb972b55565..8e9b79e8d566b 100644 --- a/src/test/rbd_mirror/test_mock_InstanceWatcher.cc +++ b/src/test/rbd_mirror/test_mock_InstanceWatcher.cc @@ -500,7 +500,7 @@ TEST_F(TestMockInstanceWatcher, ImageAcquireReleaseCancel) { const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); @@ -521,7 +521,7 @@ TEST_F(TestMockInstanceWatcher, ImageAcquireReleaseCancel) { const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); @@ -631,7 +631,7 @@ TEST_F(TestMockInstanceWatcher, PeerImageRemovedCancel) { const std::string& o, librados::AioCompletionImpl *c, bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) { c->get(); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [instance_watcher, &mock_io_ctx, c, pbl](int r) { instance_watcher->cancel_notify_requests("other"); encode(librbd::watcher::NotifyResponse(), *pbl); diff --git a/src/test/rbd_mirror/test_mock_LeaderWatcher.cc b/src/test/rbd_mirror/test_mock_LeaderWatcher.cc index 06ccf40a8881d..ec64e668c159d 100644 --- a/src/test/rbd_mirror/test_mock_LeaderWatcher.cc +++ b/src/test/rbd_mirror/test_mock_LeaderWatcher.cc @@ -82,7 +82,7 @@ struct ManagedLock { void shut_down(Context *on_shutdown) { if (MockManagedLock::get_instance().m_release_lock_on_shutdown) { - on_shutdown = new FunctionContext( + on_shutdown = new LambdaContext( [this, on_shutdown](int r) { MockManagedLock::get_instance().m_release_lock_on_shutdown = false; shut_down(on_shutdown); @@ -96,7 +96,7 @@ struct ManagedLock { void try_acquire_lock(Context *on_acquired) { Context *post_acquire_ctx = create_async_context_callback( - m_work_queue, new FunctionContext( + m_work_queue, new LambdaContext( [this, on_acquired](int r) { post_acquire_lock_handler(r, on_acquired); })); @@ -107,7 +107,7 @@ struct ManagedLock { ceph_assert(MockManagedLock::get_instance().m_on_released == nullptr); MockManagedLock::get_instance().m_on_released = on_released; - Context *post_release_ctx = new FunctionContext( + Context *post_release_ctx = new LambdaContext( [this](int r) { ceph_assert(MockManagedLock::get_instance().m_on_released != nullptr); post_release_lock_handler(false, r, @@ -115,7 +115,7 @@ struct ManagedLock { MockManagedLock::get_instance().m_on_released = nullptr; }); - Context *release_ctx = new FunctionContext( + Context *release_ctx = new LambdaContext( [post_release_ctx](int r) { if (r < 0) { MockManagedLock::get_instance().m_on_released->complete(r); @@ -124,7 +124,7 @@ struct ManagedLock { } }); - Context *pre_release_ctx = new FunctionContext( + Context *pre_release_ctx = new LambdaContext( [this, release_ctx](int r) { bool shutting_down = MockManagedLock::get_instance().m_release_lock_on_shutdown; @@ -316,7 +316,7 @@ public: if (on_finish != nullptr) { auto on_released = mock_managed_lock.m_on_released; ceph_assert(on_released != nullptr); - mock_managed_lock.m_on_released = new FunctionContext( + mock_managed_lock.m_on_released = new LambdaContext( [on_released, on_finish](int r) { on_released->complete(r); on_finish->complete(r); diff --git a/src/test/rbd_mirror/test_mock_PoolReplayer.cc b/src/test/rbd_mirror/test_mock_PoolReplayer.cc index 339ed936b0b61..b7a8ff557ab71 100644 --- a/src/test/rbd_mirror/test_mock_PoolReplayer.cc +++ b/src/test/rbd_mirror/test_mock_PoolReplayer.cc @@ -724,7 +724,7 @@ TEST_F(TestMockPoolReplayer, NamespacesError) { // test namespace replayer init fails for non leader C_SaferCond on_ns1_init; - auto ctx = new FunctionContext( + Context* ctx = new LambdaContext( [&mock_namespace, &on_ns1_init](int r) { mock_namespace.remove("ns1"); on_ns1_init.complete(r); @@ -760,7 +760,7 @@ TEST_F(TestMockPoolReplayer, NamespacesError) { expect_namespace_replayer_handle_acquire_leader(*mock_ns2_namespace_replayer, -EINVAL); - ctx = new FunctionContext( + ctx = new LambdaContext( [&mock_namespace](int) { mock_namespace.remove("ns2"); }); @@ -774,7 +774,7 @@ TEST_F(TestMockPoolReplayer, NamespacesError) { // test namespace replayer init fails on acquire leader C_SaferCond on_ns3_shut_down; - ctx = new FunctionContext( + ctx = new LambdaContext( [&mock_namespace, &on_ns3_shut_down](int) { mock_namespace.remove("ns3"); on_ns3_shut_down.complete(0); diff --git a/src/test/rbd_mirror/test_mock_PoolWatcher.cc b/src/test/rbd_mirror/test_mock_PoolWatcher.cc index ae763a2f54a47..faacf2c5b1a43 100644 --- a/src/test/rbd_mirror/test_mock_PoolWatcher.cc +++ b/src/test/rbd_mirror/test_mock_PoolWatcher.cc @@ -240,7 +240,7 @@ public: EXPECT_CALL(*mock_threads.timer, add_event_after(_, _)) .WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) { auto wrapped_ctx = - new FunctionContext([this, ctx](int r) { + new LambdaContext([this, ctx](int r) { std::lock_guard timer_locker{m_threads->timer_lock}; ctx->complete(r); }); diff --git a/src/tools/ceph_objectstore_tool.cc b/src/tools/ceph_objectstore_tool.cc index c91403d68137c..1830f7e16816b 100644 --- a/src/tools/ceph_objectstore_tool.cc +++ b/src/tools/ceph_objectstore_tool.cc @@ -539,7 +539,7 @@ void wait_until_done(ObjectStore::Transaction* txn, Func&& func) bool finished = false; std::condition_variable cond; std::mutex m; - txn->register_on_complete(make_lambda_context([&]() { + txn->register_on_complete(make_lambda_context([&](int) { std::unique_lock lock{m}; finished = true; cond.notify_one(); diff --git a/src/tools/immutable_object_cache/CacheClient.cc b/src/tools/immutable_object_cache/CacheClient.cc index 9d883c0f7ea0a..60ba9f52fc125 100644 --- a/src/tools/immutable_object_cache/CacheClient.cc +++ b/src/tools/immutable_object_cache/CacheClient.cc @@ -77,7 +77,7 @@ namespace immutable_obj_cache { int CacheClient::connect() { int ret = -1; C_SaferCond cond; - Context* on_finish = new FunctionContext([&cond, &ret](int err) { + Context* on_finish = new LambdaContext([&cond, &ret](int err) { ret = err; cond.complete(err); }); @@ -280,7 +280,7 @@ namespace immutable_obj_cache { } ceph_assert(current_request != nullptr); - auto process_reply = new FunctionContext([current_request, reply] + auto process_reply = new LambdaContext([current_request, reply] (bool dedicated) { if (dedicated) { // dedicated thrad to execute this context. diff --git a/src/tools/immutable_object_cache/ObjectCacheStore.cc b/src/tools/immutable_object_cache/ObjectCacheStore.cc index 33f8dc1adeddf..b3b793d7c2dae 100644 --- a/src/tools/immutable_object_cache/ObjectCacheStore.cc +++ b/src/tools/immutable_object_cache/ObjectCacheStore.cc @@ -120,7 +120,7 @@ int ObjectCacheStore::do_promote(std::string pool_nspace, librados::bufferlist* read_buf = new librados::bufferlist(); - auto ctx = new FunctionContext([this, read_buf, cache_file_name](int ret) { + auto ctx = new LambdaContext([this, read_buf, cache_file_name](int ret) { handle_promote_callback(ret, read_buf, cache_file_name); }); diff --git a/src/tools/rbd/action/MirrorPool.cc b/src/tools/rbd/action/MirrorPool.cc index 53e6ffdaa73c0..ed6605da1896d 100644 --- a/src/tools/rbd/action/MirrorPool.cc +++ b/src/tools/rbd/action/MirrorPool.cc @@ -272,7 +272,7 @@ public: dout(20) << this << " " << __func__ << ": image_name=" << m_image_name << dendl; - auto ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { handle_finalize(r); }); diff --git a/src/tools/rbd_mirror/ImageDeleter.cc b/src/tools/rbd_mirror/ImageDeleter.cc index 193f24658484a..f2122ba7bbc19 100644 --- a/src/tools/rbd_mirror/ImageDeleter.cc +++ b/src/tools/rbd_mirror/ImageDeleter.cc @@ -189,7 +189,7 @@ template void ImageDeleter::shut_down_trash_watcher(Context* on_finish) { dout(10) << dendl; ceph_assert(m_trash_watcher); - auto ctx = new FunctionContext([this, on_finish](int r) { + auto ctx = new LambdaContext([this, on_finish](int r) { delete m_trash_watcher; m_trash_watcher = nullptr; @@ -206,7 +206,7 @@ void ImageDeleter::wait_for_ops(Context* on_finish) { cancel_retry_timer(); } - auto ctx = new FunctionContext([this, on_finish](int) { + auto ctx = new LambdaContext([this, on_finish](int) { cancel_all_deletions(on_finish); }); m_async_op_tracker.wait_for_ops(ctx); @@ -236,7 +236,7 @@ void ImageDeleter::wait_for_deletion(const std::string& image_id, Context* on_finish) { dout(5) << "image_id=" << image_id << dendl; - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { m_threads->work_queue->queue(on_finish, r); }); @@ -374,7 +374,7 @@ void ImageDeleter::remove_images() { ceph_assert(delete_info); auto on_start = create_async_context_callback( - m_threads->work_queue, new FunctionContext( + m_threads->work_queue, new LambdaContext( [this, delete_info](int r) { if (r < 0) { notify_on_delete(delete_info->image_id, r); @@ -397,7 +397,7 @@ void ImageDeleter::remove_image(DeleteInfoRef delete_info) { m_in_flight_delete_queue.push_back(delete_info); m_async_op_tracker.start_op(); - auto ctx = new FunctionContext([this, delete_info](int r) { + auto ctx = new LambdaContext([this, delete_info](int r) { handle_remove_image(delete_info, r); m_async_op_tracker.finish_op(); }); @@ -454,7 +454,7 @@ void ImageDeleter::schedule_retry_timer() { dout(10) << dendl; auto &delete_info = m_retry_delete_queue.front(); - m_timer_ctx = new FunctionContext([this](int r) { + m_timer_ctx = new LambdaContext([this](int r) { handle_retry_timer(); }); m_threads->timer->add_event_at(delete_info->retry_time, m_timer_ctx); @@ -500,7 +500,7 @@ void ImageDeleter::handle_retry_timer() { // start (concurrent) removal of images m_async_op_tracker.start_op(); - auto ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { remove_images(); m_async_op_tracker.finish_op(); }); diff --git a/src/tools/rbd_mirror/ImageMap.cc b/src/tools/rbd_mirror/ImageMap.cc index e1089f7914053..6a308019f6934 100644 --- a/src/tools/rbd_mirror/ImageMap.cc +++ b/src/tools/rbd_mirror/ImageMap.cc @@ -118,7 +118,7 @@ void ImageMap::update_image_mapping(Updates&& map_updates, dout(5) << "updates=[" << map_updates << "], " << "removes=[" << map_removals << "]" << dendl; - Context *on_finish = new FunctionContext( + Context *on_finish = new LambdaContext( [this, map_updates, map_removals](int r) { handle_update_request(map_updates, map_removals, r); finish_async_op(); @@ -221,7 +221,7 @@ void ImageMap::schedule_update_task(const ceph::mutex &timer_lock) { } } - m_timer_task = new FunctionContext([this](int r) { + m_timer_task = new LambdaContext([this](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_timer_task = nullptr; @@ -275,7 +275,7 @@ void ImageMap::schedule_rebalance_task() { m_threads->timer->cancel_event(m_rebalance_task); } - m_rebalance_task = new FunctionContext([this](int _) { + m_rebalance_task = new LambdaContext([this](int _) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_rebalance_task = nullptr; diff --git a/src/tools/rbd_mirror/ImageReplayer.cc b/src/tools/rbd_mirror/ImageReplayer.cc index 16cc6208a4cbc..1111b3b8438b0 100644 --- a/src/tools/rbd_mirror/ImageReplayer.cc +++ b/src/tools/rbd_mirror/ImageReplayer.cc @@ -250,7 +250,7 @@ void ImageReplayer::BootstrapProgressContext::update_progress( template void ImageReplayer::RemoteJournalerListener::handle_update( ::journal::JournalMetadata *) { - FunctionContext *ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { replayer->handle_remote_journal_metadata_updated(); }); replayer->m_threads->work_queue->queue(ctx, 0); @@ -668,7 +668,7 @@ template void ImageReplayer::on_start_fail(int r, const std::string &desc) { dout(10) << "r=" << r << dendl; - Context *ctx = new FunctionContext([this, r, desc](int _r) { + Context *ctx = new LambdaContext([this, r, desc](int _r) { { std::lock_guard locker{m_lock}; ceph_assert(m_state == STATE_STARTING); @@ -820,7 +820,7 @@ void ImageReplayer::handle_replay_ready() template void ImageReplayer::restart(Context *on_finish) { - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, on_finish](int r) { if (r < 0) { // Try start anyway. @@ -852,7 +852,7 @@ void ImageReplayer::flush_local_replay(Context* on_flush) } dout(15) << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, on_flush](int r) { handle_flush_local_replay(on_flush, r); }); @@ -884,7 +884,7 @@ void ImageReplayer::flush_commit_position(Context* on_flush) } dout(15) << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, on_flush](int r) { handle_flush_commit_position(on_flush, r); }); @@ -976,7 +976,7 @@ void ImageReplayer::replay_flush() { // replayer to handle the new tag epoch Context *ctx = create_context_callback< ImageReplayer, &ImageReplayer::handle_replay_flush>(this); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_local_image_ctx->journal->stop_external_replay(); m_local_replay = nullptr; @@ -1134,7 +1134,7 @@ void ImageReplayer::preprocess_entry() { std::lock_guard timer_locker{m_threads->timer_lock}; ceph_assert(m_delayed_preprocess_task == nullptr); - m_delayed_preprocess_task = new FunctionContext( + m_delayed_preprocess_task = new LambdaContext( [this](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_delayed_preprocess_task = nullptr; @@ -1245,7 +1245,7 @@ void ImageReplayer::handle_process_entry_safe(const ReplayEntry &replay_entry g_perf_counters->tinc(l_rbd_mirror_replay_latency, latency); } - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, bytes, latency](int r) { std::lock_guard locker{m_lock}; if (m_perf_counters) { @@ -1321,18 +1321,18 @@ template void ImageReplayer::queue_mirror_image_status_update(const OptionalState &state) { dout(15) << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, state](int r) { send_mirror_status_update(state); }); // ensure pending IO is flushed and the commit position is updated // prior to updating the mirror status - ctx = new FunctionContext( - [this, ctx](int r) { + auto ctx2 = new LambdaContext( + [this, ctx=std::move(ctx)](int r) { flush_local_replay(ctx); }); - m_threads->work_queue->queue(ctx, 0); + m_threads->work_queue->queue(ctx2, 0); } template @@ -1387,7 +1387,7 @@ void ImageReplayer::send_mirror_status_update(const OptionalState &opt_state) case STATE_REPLAY_FLUSHING: status.state = cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING; { - Context *on_req_finish = new FunctionContext( + Context *on_req_finish = new LambdaContext( [this](int r) { dout(15) << "replay status ready: r=" << r << dendl; if (r >= 0) { @@ -1507,7 +1507,7 @@ void ImageReplayer::reschedule_update_status_task(int new_interval) { if (new_interval >= 0 && is_running_() && start_mirror_image_status_update(true, false)) { - m_update_status_task = new FunctionContext( + m_update_status_task = new LambdaContext( [this](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_update_status_task = nullptr; @@ -1557,7 +1557,7 @@ void ImageReplayer::shut_down(int r) { if (m_in_flight_status_updates > 0) { if (m_on_update_status_finish == nullptr) { dout(15) << "waiting for in-flight status update" << dendl; - m_on_update_status_finish = new FunctionContext( + m_on_update_status_finish = new LambdaContext( [this, r](int _r) { shut_down(r); }); @@ -1571,7 +1571,7 @@ void ImageReplayer::shut_down(int r) { // case the remote cluster is unreachable // chain the shut down sequence (reverse order) - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this, r](int _r) { update_mirror_image_status(true, STATE_STOPPED); handle_shut_down(r); @@ -1579,12 +1579,12 @@ void ImageReplayer::shut_down(int r) { // close the remote journal if (m_remote_journaler != nullptr) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { delete m_remote_journaler; m_remote_journaler = nullptr; ctx->complete(0); }); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_remote_journaler->remove_listener(&m_remote_listener); m_remote_journaler->shut_down(ctx); }); @@ -1592,20 +1592,20 @@ void ImageReplayer::shut_down(int r) { // stop the replay of remote journal events if (m_replay_handler != nullptr) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { delete m_replay_handler; m_replay_handler = nullptr; m_event_replay_tracker.wait_for_ops(ctx); }); - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_remote_journaler->stop_replay(ctx); }); } // close the local image (release exclusive lock) if (m_local_image_ctx) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { CloseImageRequest *request = CloseImageRequest::create( &m_local_image_ctx, ctx); request->send(); @@ -1614,12 +1614,12 @@ void ImageReplayer::shut_down(int r) { // shut down event replay into the local image if (m_local_journal != nullptr) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_local_journal = nullptr; ctx->complete(0); }); if (m_local_replay != nullptr) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_local_journal->stop_external_replay(); m_local_replay = nullptr; @@ -1628,7 +1628,7 @@ void ImageReplayer::shut_down(int r) { ctx->complete(0); }); } - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { // blocks if listener notification is in-progress m_local_journal->remove_listener(m_journal_listener); ctx->complete(0); @@ -1636,7 +1636,7 @@ void ImageReplayer::shut_down(int r) { } // wait for all local in-flight replay events to complete - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { if (r < 0) { derr << "error shutting down journal replay: " << cpp_strerror(r) << dendl; @@ -1647,7 +1647,7 @@ void ImageReplayer::shut_down(int r) { // flush any local in-flight replay events if (m_local_replay != nullptr) { - ctx = new FunctionContext([this, ctx](int r) { + ctx = new LambdaContext([this, ctx](int r) { m_local_replay->shut_down(true, ctx); }); } @@ -1670,7 +1670,7 @@ void ImageReplayer::handle_shut_down(int r) { if (m_in_flight_status_updates > 0) { if (m_on_update_status_finish == nullptr) { dout(15) << "waiting for in-flight status update" << dendl; - m_on_update_status_finish = new FunctionContext( + m_on_update_status_finish = new LambdaContext( [this, r](int _r) { handle_shut_down(r); }); @@ -1702,7 +1702,7 @@ void ImageReplayer::handle_shut_down(int r) { if (delete_requested || resync_requested) { dout(5) << "moving image to trash" << dendl; - auto ctx = new FunctionContext([this, r](int) { + auto ctx = new LambdaContext([this, r](int) { handle_shut_down(r); }); ImageDeleter::trash_move(m_local_io_ctx, m_global_image_id, diff --git a/src/tools/rbd_mirror/ImageSync.cc b/src/tools/rbd_mirror/ImageSync.cc index 918bc22190f93..ae1c475f62379 100644 --- a/src/tools/rbd_mirror/ImageSync.cc +++ b/src/tools/rbd_mirror/ImageSync.cc @@ -364,7 +364,7 @@ void ImageSync::handle_update_sync_point(int r) { m_updating_sync_point = false; if (m_image_copy_request != nullptr) { - m_update_sync_ctx = new FunctionContext( + m_update_sync_ctx = new LambdaContext( [this](int r) { std::lock_guard locker{m_lock}; this->send_update_sync_point(); diff --git a/src/tools/rbd_mirror/InstanceReplayer.cc b/src/tools/rbd_mirror/InstanceReplayer.cc index 88f048006474c..689eee740c693 100644 --- a/src/tools/rbd_mirror/InstanceReplayer.cc +++ b/src/tools/rbd_mirror/InstanceReplayer.cc @@ -62,7 +62,7 @@ template void InstanceReplayer::init(Context *on_finish) { dout(10) << dendl; - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this, on_finish] (int r) { { std::lock_guard timer_locker{m_threads->timer_lock}; @@ -91,7 +91,7 @@ void InstanceReplayer::shut_down(Context *on_finish) { ceph_assert(m_on_shut_down == nullptr); m_on_shut_down = on_finish; - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this] (int r) { cancel_image_state_check_task(); wait_for_ops(); @@ -121,7 +121,7 @@ void InstanceReplayer::release_all(Context *on_finish) { it = m_image_replayers.erase(it)) { auto image_replayer = it->second; auto ctx = gather_ctx->new_sub(); - ctx = new FunctionContext( + ctx = new LambdaContext( [image_replayer, ctx] (int r) { image_replayer->destroy(); ctx->complete(0); @@ -188,7 +188,7 @@ void InstanceReplayer::release_image(const std::string &global_image_id, auto image_replayer = it->second; m_image_replayers.erase(it); - on_finish = new FunctionContext( + on_finish = new LambdaContext( [image_replayer, on_finish] (int r) { image_replayer->destroy(); on_finish->complete(0); @@ -407,7 +407,7 @@ void InstanceReplayer::stop_image_replayer(ImageReplayer *image_replayer, m_async_op_tracker.start_op(); Context *ctx = create_async_context_callback( - m_threads->work_queue, new FunctionContext( + m_threads->work_queue, new LambdaContext( [this, image_replayer, on_finish] (int r) { stop_image_replayer(image_replayer, on_finish); m_async_op_tracker.finish_op(); @@ -419,7 +419,7 @@ void InstanceReplayer::stop_image_replayer(ImageReplayer *image_replayer, int after = 1; dout(10) << "scheduling image replayer " << image_replayer << " stop after " << after << " sec (task " << ctx << ")" << dendl; - ctx = new FunctionContext( + ctx = new LambdaContext( [this, after, ctx] (int r) { std::lock_guard timer_locker{m_threads->timer_lock}; m_threads->timer->add_event_after(after, ctx); @@ -506,7 +506,7 @@ void InstanceReplayer::schedule_image_state_check_task() { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); ceph_assert(m_image_state_check_task == nullptr); - m_image_state_check_task = new FunctionContext( + m_image_state_check_task = new LambdaContext( [this](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_image_state_check_task = nullptr; diff --git a/src/tools/rbd_mirror/InstanceWatcher.cc b/src/tools/rbd_mirror/InstanceWatcher.cc index 122e35326b60b..58253c8c52acf 100644 --- a/src/tools/rbd_mirror/InstanceWatcher.cc +++ b/src/tools/rbd_mirror/InstanceWatcher.cc @@ -513,7 +513,7 @@ void InstanceWatcher::notify_sync_start(const std::string &instance_id, bufferlist bl; encode(NotifyMessage{SyncStartPayload{request_id, sync_id}}, bl); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, sync_id] (int r) { dout(10) << "finish: sync_id=" << sync_id << ", r=" << r << dendl; std::lock_guard locker{m_lock}; @@ -1040,7 +1040,7 @@ Context *InstanceWatcher::prepare_request(const std::string &instance_id, m_requests.erase(it); } else { ctx = create_async_context_callback( - m_work_queue, new FunctionContext( + m_work_queue, new LambdaContext( [this, instance_id, request_id] (int r) { complete_request(instance_id, request_id, r); })); @@ -1098,7 +1098,7 @@ void InstanceWatcher::handle_image_acquire( const std::string &global_image_id, Context *on_finish) { dout(10) << "global_image_id=" << global_image_id << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, global_image_id, on_finish] (int r) { m_instance_replayer->acquire_image(this, global_image_id, on_finish); m_notify_op_tracker.finish_op(); @@ -1113,7 +1113,7 @@ void InstanceWatcher::handle_image_release( const std::string &global_image_id, Context *on_finish) { dout(10) << "global_image_id=" << global_image_id << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, global_image_id, on_finish] (int r) { m_instance_replayer->release_image(global_image_id, on_finish); m_notify_op_tracker.finish_op(); @@ -1130,7 +1130,7 @@ void InstanceWatcher::handle_peer_image_removed( dout(10) << "global_image_id=" << global_image_id << ", " << "peer_mirror_uuid=" << peer_mirror_uuid << dendl; - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, peer_mirror_uuid, global_image_id, on_finish] (int r) { m_instance_replayer->remove_peer_image(global_image_id, peer_mirror_uuid, on_finish); @@ -1156,7 +1156,7 @@ void InstanceWatcher::handle_sync_request(const std::string &instance_id, } Context *on_start = create_async_context_callback( - m_work_queue, new FunctionContext( + m_work_queue, new LambdaContext( [this, instance_id, sync_id, on_finish] (int r) { dout(10) << "handle_sync_request: finish: instance_id=" << instance_id << ", sync_id=" << sync_id << ", r=" << r << dendl; diff --git a/src/tools/rbd_mirror/Instances.cc b/src/tools/rbd_mirror/Instances.cc index 13d678d7cea58..47c7de2a7ef5c 100644 --- a/src/tools/rbd_mirror/Instances.cc +++ b/src/tools/rbd_mirror/Instances.cc @@ -55,7 +55,7 @@ void Instances::shut_down(Context *on_finish) { ceph_assert(m_on_finish == nullptr); m_on_finish = on_finish; - Context *ctx = new FunctionContext( + Context *ctx = new LambdaContext( [this](int r) { std::scoped_lock locker{m_threads->timer_lock, m_lock}; cancel_remove_task(); @@ -255,7 +255,7 @@ void Instances::remove_instances(const Instances::clock_t::time_point& tim ceph_assert(!instance_ids.empty()); dout(10) << "instance_ids=" << instance_ids << dendl; - Context* ctx = new FunctionContext([this, instance_ids](int r) { + Context* ctx = new LambdaContext([this, instance_ids](int r) { handle_remove_instances(r, instance_ids); }); ctx = create_async_context_callback(m_threads->work_queue, ctx); @@ -337,7 +337,7 @@ void Instances::schedule_remove_task(const Instances::clock_t::time_point& dout(10) << dendl; // schedule a time to fire when the oldest instance should be removed - m_timer_task = new FunctionContext( + m_timer_task = new LambdaContext( [this, oldest_time](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); std::lock_guard locker{m_lock}; diff --git a/src/tools/rbd_mirror/LeaderWatcher.cc b/src/tools/rbd_mirror/LeaderWatcher.cc index 4f35e1c0aebbd..0179555356002 100644 --- a/src/tools/rbd_mirror/LeaderWatcher.cc +++ b/src/tools/rbd_mirror/LeaderWatcher.cc @@ -235,7 +235,7 @@ void LeaderWatcher::handle_wait_for_tasks() { ceph_assert(!m_timer_op_tracker.empty()); m_timer_op_tracker.finish_op(); - auto ctx = new FunctionContext([this](int r) { + auto ctx = new LambdaContext([this](int r) { Context *on_finish; { // ensure lock isn't held when completing shut down @@ -350,7 +350,7 @@ void LeaderWatcher::schedule_timer_task(const std::string &name, cancel_timer_task(); - m_timer_task = new FunctionContext( + m_timer_task = new LambdaContext( [this, leader, timer_callback](int r) { ceph_assert(ceph_mutex_is_locked(m_threads->timer_lock)); m_timer_task = nullptr; @@ -582,7 +582,7 @@ void LeaderWatcher::handle_get_locker(int r, return; } - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this](int r) { std::string instance_id; if (get_leader_instance_id(&instance_id)) { @@ -767,12 +767,12 @@ void LeaderWatcher::notify_listener() { LeaderWatcher, &LeaderWatcher::handle_notify_listener>(this)); if (is_leader(m_lock)) { - ctx = new FunctionContext( + ctx = new LambdaContext( [this, ctx](int r) { m_listener->post_acquire_handler(ctx); }); } else { - ctx = new FunctionContext( + ctx = new LambdaContext( [this, ctx](int r) { m_listener->pre_release_handler(ctx); }); diff --git a/src/tools/rbd_mirror/MirrorStatusWatcher.cc b/src/tools/rbd_mirror/MirrorStatusWatcher.cc index b935bc5cd0f09..219f3e302a3d6 100644 --- a/src/tools/rbd_mirror/MirrorStatusWatcher.cc +++ b/src/tools/rbd_mirror/MirrorStatusWatcher.cc @@ -32,7 +32,7 @@ template void MirrorStatusWatcher::init(Context *on_finish) { dout(20) << dendl; - on_finish = new FunctionContext( + on_finish = new LambdaContext( [this, on_finish] (int r) { if (r < 0) { derr << "error removing down statuses: " << cpp_strerror(r) << dendl; diff --git a/src/tools/rbd_mirror/NamespaceReplayer.cc b/src/tools/rbd_mirror/NamespaceReplayer.cc index b4c3a2f258017..fcc6bbd3d05ee 100644 --- a/src/tools/rbd_mirror/NamespaceReplayer.cc +++ b/src/tools/rbd_mirror/NamespaceReplayer.cc @@ -100,7 +100,7 @@ void NamespaceReplayer::shut_down(Context *on_finish) { } } - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this] (int r) { std::lock_guard locker{m_lock}; stop_instance_replayer(); @@ -506,7 +506,7 @@ void NamespaceReplayer::init_image_map(Context *on_finish) { m_instance_watcher->get_instance_id(), m_image_map_listener)); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, on_finish](int r) { handle_init_image_map(r, on_finish); }); @@ -519,7 +519,7 @@ void NamespaceReplayer::handle_init_image_map(int r, Context *on_finish) { dout(10) << "r=" << r << dendl; if (r < 0) { derr << "failed to init image map: " << cpp_strerror(r) << dendl; - on_finish = new FunctionContext([on_finish, r](int) { + on_finish = new LambdaContext([on_finish, r](int) { on_finish->complete(r); }); shut_down_image_map(on_finish); @@ -540,7 +540,7 @@ void NamespaceReplayer::init_local_pool_watcher(Context *on_finish) { // ensure the initial set of local images is up-to-date // after acquiring the leader role - auto ctx = new FunctionContext([this, on_finish](int r) { + auto ctx = new LambdaContext([this, on_finish](int r) { handle_init_local_pool_watcher(r, on_finish); }); m_local_pool_watcher->init(create_async_context_callback( @@ -553,7 +553,7 @@ void NamespaceReplayer::handle_init_local_pool_watcher( dout(10) << "r=" << r << dendl; if (r < 0) { derr << "failed to retrieve local images: " << cpp_strerror(r) << dendl; - on_finish = new FunctionContext([on_finish, r](int) { + on_finish = new LambdaContext([on_finish, r](int) { on_finish->complete(r); }); shut_down_pool_watchers(on_finish); @@ -572,7 +572,7 @@ void NamespaceReplayer::init_remote_pool_watcher(Context *on_finish) { m_remote_pool_watcher.reset(PoolWatcher::create( m_threads, m_remote_io_ctx, m_remote_pool_watcher_listener)); - auto ctx = new FunctionContext([this, on_finish](int r) { + auto ctx = new LambdaContext([this, on_finish](int r) { handle_init_remote_pool_watcher(r, on_finish); }); m_remote_pool_watcher->init(create_async_context_callback( @@ -591,7 +591,7 @@ void NamespaceReplayer::handle_init_remote_pool_watcher( dout(0) << "remote peer does not have mirroring configured" << dendl; } else if (r < 0) { derr << "failed to retrieve remote images: " << cpp_strerror(r) << dendl; - on_finish = new FunctionContext([on_finish, r](int) { + on_finish = new LambdaContext([on_finish, r](int) { on_finish->complete(r); }); shut_down_pool_watchers(on_finish); @@ -608,7 +608,7 @@ void NamespaceReplayer::init_image_deleter(Context *on_finish) { std::lock_guard locker{m_lock}; ceph_assert(!m_image_deleter); - on_finish = new FunctionContext([this, on_finish](int r) { + on_finish = new LambdaContext([this, on_finish](int r) { handle_init_image_deleter(r, on_finish); }); m_image_deleter.reset(ImageDeleter::create(m_local_io_ctx, m_threads, @@ -624,7 +624,7 @@ void NamespaceReplayer::handle_init_image_deleter( dout(10) << "r=" << r << dendl; if (r < 0) { derr << "failed to init image deleter: " << cpp_strerror(r) << dendl; - on_finish = new FunctionContext([on_finish, r](int) { + on_finish = new LambdaContext([on_finish, r](int) { on_finish->complete(r); }); shut_down_image_deleter(on_finish); @@ -640,7 +640,7 @@ void NamespaceReplayer::shut_down_image_deleter(Context* on_finish) { { std::lock_guard locker{m_lock}; if (m_image_deleter) { - Context *ctx = new FunctionContext([this, on_finish](int r) { + Context *ctx = new LambdaContext([this, on_finish](int r) { handle_shut_down_image_deleter(r, on_finish); }); ctx = create_async_context_callback(m_threads->work_queue, ctx); @@ -673,7 +673,7 @@ void NamespaceReplayer::shut_down_pool_watchers(Context *on_finish) { { std::lock_guard locker{m_lock}; if (m_local_pool_watcher) { - Context *ctx = new FunctionContext([this, on_finish](int r) { + Context *ctx = new LambdaContext([this, on_finish](int r) { handle_shut_down_pool_watchers(r, on_finish); }); ctx = create_async_context_callback(m_threads->work_queue, ctx); @@ -714,7 +714,7 @@ void NamespaceReplayer::shut_down_image_map(Context *on_finish) { std::lock_guard locker{m_lock}; if (m_image_map) { - on_finish = new FunctionContext( + on_finish = new LambdaContext( [this, on_finish](int r) { handle_shut_down_image_map(r, on_finish); }); diff --git a/src/tools/rbd_mirror/PoolReplayer.cc b/src/tools/rbd_mirror/PoolReplayer.cc index 571c22185f014..7abd1dd36791a 100644 --- a/src/tools/rbd_mirror/PoolReplayer.cc +++ b/src/tools/rbd_mirror/PoolReplayer.cc @@ -582,7 +582,7 @@ void PoolReplayer::update_namespace_replayers() { auto iter = mirroring_namespaces.find(it->first); if (iter == mirroring_namespaces.end()) { auto namespace_replayer = it->second; - auto on_shut_down = new FunctionContext( + auto on_shut_down = new LambdaContext( [this, namespace_replayer, ctx=gather_ctx->new_sub()](int r) { delete namespace_replayer; ctx->complete(r); @@ -601,7 +601,7 @@ void PoolReplayer::update_namespace_replayers() { m_threads, m_image_sync_throttler.get(), m_image_deletion_throttler.get(), m_service_daemon, m_cache_manager_handler); - auto on_init = new FunctionContext( + auto on_init = new LambdaContext( [this, namespace_replayer, name, &mirroring_namespaces, ctx=gather_ctx->new_sub()](int r) { if (r < 0) { @@ -699,7 +699,7 @@ void PoolReplayer::namespace_replayer_acquire_leader(const std::string &name, auto it = m_namespace_replayers.find(name); ceph_assert(it != m_namespace_replayers.end()); - on_finish = new FunctionContext( + on_finish = new LambdaContext( [this, name, on_finish](int r) { if (r < 0) { derr << "failed to handle acquire leader for namespace: " @@ -712,7 +712,7 @@ void PoolReplayer::namespace_replayer_acquire_leader(const std::string &name, auto namespace_replayer = m_namespace_replayers[name]; m_namespace_replayers.erase(name); - auto on_shut_down = new FunctionContext( + auto on_shut_down = new LambdaContext( [this, namespace_replayer, on_finish](int r) { delete namespace_replayer; on_finish->complete(r); @@ -896,7 +896,7 @@ void PoolReplayer::handle_post_acquire_leader(Context *on_finish) { m_service_daemon->add_or_update_attribute(m_local_pool_id, SERVICE_DAEMON_LEADER_KEY, true); - auto ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, on_finish](int r) { if (r == 0) { std::lock_guard locker{m_lock}; diff --git a/src/tools/rbd_mirror/PoolReplayer.h b/src/tools/rbd_mirror/PoolReplayer.h index aacff90a24857..73bdb32d54c99 100644 --- a/src/tools/rbd_mirror/PoolReplayer.h +++ b/src/tools/rbd_mirror/PoolReplayer.h @@ -146,7 +146,7 @@ private: std::lock_guard locker{m_lock}; on_finish = librbd::util::create_async_context_callback( - m_threads->work_queue, new FunctionContext( + m_threads->work_queue, new LambdaContext( [this, on_finish](int r) { { std::lock_guard locker{m_lock}; @@ -163,7 +163,7 @@ private: on_finish->complete(r); })); - auto on_lock = new FunctionContext( + auto on_lock = new LambdaContext( [this, callback, on_finish](int) { std::lock_guard locker{m_lock}; ceph_assert(m_namespace_replayers_locked); diff --git a/src/tools/rbd_mirror/PoolWatcher.cc b/src/tools/rbd_mirror/PoolWatcher.cc index 0ee7be40557ff..4bc516f5ebacb 100644 --- a/src/tools/rbd_mirror/PoolWatcher.cc +++ b/src/tools/rbd_mirror/PoolWatcher.cc @@ -193,7 +193,7 @@ void PoolWatcher::unregister_watcher() { dout(5) << dendl; m_async_op_tracker.start_op(); - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { dout(5) << "unregister_watcher: r=" << r << dendl; if (r < 0) { derr << "error unregistering watcher for " @@ -365,7 +365,7 @@ void PoolWatcher::schedule_refresh_images(double interval) { m_image_ids_invalid = true; m_timer_ctx = m_threads->timer->add_event_after( interval, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { process_refresh_images(); })); } @@ -427,7 +427,7 @@ void PoolWatcher::process_refresh_images() { // execute outside of the timer's lock m_async_op_tracker.start_op(); - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { register_watcher(); m_async_op_tracker.finish_op(); }); @@ -445,7 +445,7 @@ void PoolWatcher::schedule_listener() { dout(20) << dendl; m_async_op_tracker.start_op(); - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { notify_listener(); m_async_op_tracker.finish_op(); }); diff --git a/src/tools/rbd_mirror/ServiceDaemon.cc b/src/tools/rbd_mirror/ServiceDaemon.cc index b08c6effa90dd..19f69ea88de7f 100644 --- a/src/tools/rbd_mirror/ServiceDaemon.cc +++ b/src/tools/rbd_mirror/ServiceDaemon.cc @@ -197,7 +197,7 @@ void ServiceDaemon::schedule_update_status() { return; } - m_timer_ctx = new FunctionContext([this](int) { + m_timer_ctx = new LambdaContext([this](int) { m_timer_ctx = nullptr; update_status(); }); diff --git a/src/tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc b/src/tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc index 020db85b8770d..58ebd5e58dc78 100644 --- a/src/tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc +++ b/src/tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc @@ -159,7 +159,7 @@ void SnapshotPurgeRequest::snap_unprotect() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_unprotect(r); finish_op_ctx->complete(0); }); @@ -214,7 +214,7 @@ void SnapshotPurgeRequest::snap_remove() { return; } - auto ctx = new FunctionContext([this, finish_op_ctx](int r) { + auto ctx = new LambdaContext([this, finish_op_ctx](int r) { handle_snap_remove(r); finish_op_ctx->complete(0); }); diff --git a/src/tools/rbd_mirror/image_deleter/TrashWatcher.cc b/src/tools/rbd_mirror/image_deleter/TrashWatcher.cc index 6d551b56b20ec..e68cac9d5782d 100644 --- a/src/tools/rbd_mirror/image_deleter/TrashWatcher.cc +++ b/src/tools/rbd_mirror/image_deleter/TrashWatcher.cc @@ -70,7 +70,7 @@ void TrashWatcher::shut_down(Context *on_finish) { } } - auto ctx = new FunctionContext([this, on_finish](int r) { + auto ctx = new LambdaContext([this, on_finish](int r) { unregister_watcher(on_finish); }); m_async_op_tracker.wait_for_ops(ctx); @@ -222,7 +222,7 @@ void TrashWatcher::unregister_watcher(Context* on_finish) { dout(5) << dendl; m_async_op_tracker.start_op(); - Context *ctx = new FunctionContext([this, on_finish](int r) { + Context *ctx = new LambdaContext([this, on_finish](int r) { handle_unregister_watcher(r, on_finish); }); this->unregister_watch(ctx); @@ -327,7 +327,7 @@ void TrashWatcher::schedule_trash_list(double interval) { dout(5) << dendl; m_timer_ctx = m_threads->timer->add_event_after( interval, - new FunctionContext([this](int r) { + new LambdaContext([this](int r) { process_trash_list(); })); } @@ -348,7 +348,7 @@ void TrashWatcher::process_trash_list() { // execute outside of the timer's lock m_async_op_tracker.start_op(); - Context *ctx = new FunctionContext([this](int r) { + Context *ctx = new LambdaContext([this](int r) { create_trash(); m_async_op_tracker.finish_op(); }); @@ -368,7 +368,7 @@ void TrashWatcher::add_image(const std::string& image_id, << "deferment_end_time=" << deferment_end_time << dendl; m_async_op_tracker.start_op(); - auto ctx = new FunctionContext([this, image_id, deferment_end_time](int r) { + auto ctx = new LambdaContext([this, image_id, deferment_end_time](int r) { m_trash_listener.handle_trash_image(image_id, deferment_end_time.to_real_time()); m_async_op_tracker.finish_op(); diff --git a/src/tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc b/src/tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc index b0e90735933a2..947190100852c 100644 --- a/src/tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc +++ b/src/tools/rbd_mirror/image_replayer/ReplayStatusFormatter.cc @@ -170,7 +170,7 @@ void ReplayStatusFormatter::send_update_tag_cache(uint64_t master_tag_tid, dout(20) << "master_tag_tid=" << master_tag_tid << ", mirror_tag_tid=" << mirror_tag_tid << dendl; - FunctionContext *ctx = new FunctionContext( + auto ctx = new LambdaContext( [this, master_tag_tid, mirror_tag_tid](int r) { handle_update_tag_cache(master_tag_tid, mirror_tag_tid, r); }); -- 2.39.5