public:
using condition = InterruptCond;
+ static const void *get_interrupt_cond() {
+ return (const void*)interrupt_cond<InterruptCond>.interrupt_cond.get();
+ }
+
template <typename FutureType>
[[gnu::always_inline]]
static interruptible_future_detail<InterruptCond, FutureType>
LOCAL_LOGGER.log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__)
#define SUBLOG(subname_, level_, MSG, ...) \
LOGGER(subname_).log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__)
+#define LOGI(level_, MSG, ...) \
+ LOCAL_LOGGER.log(level_, "{} {}: " MSG, \
+ interruptor::get_interrupt_cond(), FNAME , ##__VA_ARGS__)
+#define SUBLOGI(subname_, level_, MSG, ...) \
+ LOGGER(subname_).log(level_, "{} {}: " MSG, \
+ interruptor::get_interrupt_cond(), FNAME , ##__VA_ARGS__)
#define TRACE(...) LOG(seastar::log_level::trace, __VA_ARGS__)
+#define TRACEI(...) LOGI(seastar::log_level::trace, __VA_ARGS__)
#define SUBTRACE(subname_, ...) SUBLOG(subname_, seastar::log_level::trace, __VA_ARGS__)
+#define SUBTRACEI(subname_, ...) SUBLOGI(subname_, seastar::log_level::trace, __VA_ARGS__)
#define DEBUG(...) LOG(seastar::log_level::debug, __VA_ARGS__)
+#define DEBUGI(...) LOGI(seastar::log_level::debug, __VA_ARGS__)
#define SUBDEBUG(subname_, ...) SUBLOG(subname_, seastar::log_level::debug, __VA_ARGS__)
+#define SUBDEBUGI(subname_, ...) SUBLOGI(subname_, seastar::log_level::debug, __VA_ARGS__)
#define INFO(...) LOG(seastar::log_level::info, __VA_ARGS__)
+#define INFOI(...) LOGI(seastar::log_level::info, __VA_ARGS__)
#define SUBINFO(subname_, ...) SUBLOG(subname_, seastar::log_level::info, __VA_ARGS__)
+#define SUBINFOI(subname_, ...) SUBLOGI(subname_, seastar::log_level::info, __VA_ARGS__)
#define WARN(...) LOG(seastar::log_level::warn, __VA_ARGS__)
+#define WARNI(...) LOGI(seastar::log_level::warn, __VA_ARGS__)
#define SUBWARN(subname_, ...) SUBLOG(subname_, seastar::log_level::warn, __VA_ARGS__)
+#define SUBWARNI(subname_, ...) SUBLOGI(subname_, seastar::log_level::warn, __VA_ARGS__)
#define ERROR(...) LOG(seastar::log_level::error, __VA_ARGS__)
+#define ERRORI(...) LOGI(seastar::log_level::error, __VA_ARGS__)
#define SUBERROR(subname_, ...) SUBLOG(subname_, seastar::log_level::error, __VA_ARGS__)
+#define SUBERRORI(subname_, ...) SUBLOGI(subname_, seastar::log_level::error, __VA_ARGS__)
// *DPP macros are intended to take DoutPrefixProvider implementations, but anything with
// an operator<< will work as a prefix
#define SUBLOGDPP(subname_, level_, MSG, dpp, ...) \
LOGGER(subname_).log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__)
+#define SUBLOGDPPI(subname_, level_, MSG, dpp, ...) \
+ LOGGER(subname_).log(level_, "{} {}: " MSG, \
+ interruptor::get_interrupt_cond(), dpp, FNAME , ##__VA_ARGS__)
#define SUBTRACEDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::trace, __VA_ARGS__)
+#define SUBTRACEDPPI(subname_, ...) SUBLOGDPPI(subname_, seastar::log_level::trace, __VA_ARGS__)
#define SUBDEBUGDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::debug, __VA_ARGS__)
+#define SUBDEBUGDPPI(subname_, ...) SUBLOGDPPI(subname_, seastar::log_level::debug, __VA_ARGS__)
#define SUBINFODPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::info, __VA_ARGS__)
+#define SUBINFODPPI(subname_, ...) SUBLOGDPPI(subname_, seastar::log_level::info, __VA_ARGS__)
#define SUBWARNDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::warn, __VA_ARGS__)
+#define SUBWARNDPPI(subname_, ...) SUBLOGDPPI(subname_, seastar::log_level::warn, __VA_ARGS__)
#define SUBERRORDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::error, __VA_ARGS__)
+#define SUBERRORDPPI(subname_, ...) SUBLOGDPPI(subname_, seastar::log_level::error, __VA_ARGS__)
#define LOGDPP(level_, MSG, dpp, ...) \
LOCAL_LOGGER.log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__)
+#define LOGDPPI(level_, MSG, dpp, ...) \
+ LOCAL_LOGGER.log(level_, "{} {}: " MSG, \
+ interruptor::get_interrupt_cond(), dpp, FNAME , ##__VA_ARGS__)
#define TRACEDPP(...) LOGDPP(seastar::log_level::trace, __VA_ARGS__)
+#define TRACEDPPI(...) LOGDPPI(seastar::log_level::trace, __VA_ARGS__)
#define DEBUGDPP(...) LOGDPP(seastar::log_level::debug, __VA_ARGS__)
+#define DEBUGDPPI(...) LOGDPPI(seastar::log_level::debug, __VA_ARGS__)
#define INFODPP(...) LOGDPP(seastar::log_level::info, __VA_ARGS__)
+#define INFODPPI(...) LOGDPPI(seastar::log_level::info, __VA_ARGS__)
#define WARNDPP(...) LOGDPP(seastar::log_level::warn, __VA_ARGS__)
+#define WARNDPPI(...) LOGDPPI(seastar::log_level::warn, __VA_ARGS__)
#define ERRORDPP(...) LOGDPP(seastar::log_level::error, __VA_ARGS__)
+#define ERRORDPPI(...) LOGDPPI(seastar::log_level::error, __VA_ARGS__)
};
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
template <class T>
template <class T>
seastar::future<> BackgroundRecoveryT<T>::start()
{
- logger().debug("{}: start", *this);
-
typename T::IRef ref = static_cast<T*>(this);
+ using interruptor = typename T::interruptor;
+
+ LOG_PREFIX(BackgroundRecoveryT<T>::start);
+ DEBUGDPPI("{}: start", *pg, *this);
auto maybe_delay = seastar::now();
if (delay) {
maybe_delay = seastar::sleep(
return ss.with_throttle_while(
std::move(trigger),
this, get_scheduler_params(), [this] {
- return T::interruptor::with_interruption([this] {
+ return interruptor::with_interruption([this] {
return do_recovery();
}, [](std::exception_ptr) {
return seastar::make_ready_future<bool>(false);
}, pg);
}).handle_exception_type([ref, this](const std::system_error& err) {
+ LOG_PREFIX(BackgroundRecoveryT<T>::start);
if (err.code() == std::make_error_code(std::errc::interrupted)) {
- logger().debug("{} recovery interruped: {}", *pg, err.what());
+ DEBUGDPPI("recovery interruped: {}", *pg, err.what());
return seastar::now();
}
return seastar::make_exception_future<>(err);
UrgentRecovery::interruptible_future<bool>
UrgentRecovery::do_recovery()
{
- logger().debug("{}: {}", __func__, *this);
+ LOG_PREFIX(UrgentRecovery::do_recovery);
+ DEBUGDPPI("{}: {}", *pg, __func__, *this);
if (!pg->has_reset_since(epoch_started)) {
return with_blocking_event<RecoveryBackend::RecoveryBlockingEvent,
interruptor>([this] (auto&& trigger) {
BackfillRecovery::interruptible_future<bool>
BackfillRecovery::do_recovery()
{
- logger().debug("{}", __func__);
+ LOG_PREFIX(BackfillRecovery::do_recovery);
+ DEBUGDPPI("{}", *pg, __func__);
if (pg->has_reset_since(epoch_started)) {
- logger().debug("{}: pg got reset since epoch_started={}",
- __func__, epoch_started);
+ DEBUGDPPI("{}: pg got reset since epoch_started={}",
+ *pg, __func__, epoch_started);
return seastar::make_ready_future<bool>(false);
}
// TODO: limits
}
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
void ClientRequest::Orderer::requeue(
ShardServices &shard_services, Ref<PG> pg)
{
+ LOG_PREFIX(ClientRequest::Orderer::requeue);
for (auto &req: list) {
- logger().debug("{}: {} requeueing {}", __func__, *pg, req);
+ DEBUGI("{}: {} requeueing {}", __func__, *pg, req);
req.reset_instance_handle();
std::ignore = req.with_pg_int(shard_services, pg);
}
void ClientRequest::Orderer::clear_and_cancel()
{
+ LOG_PREFIX(ClientRequest::Orderer::clear_and_cancel);
for (auto i = list.begin(); i != list.end(); ) {
- logger().debug(
+ DEBUGI(
"ClientRequest::Orderer::clear_and_cancel: {}",
*i);
i->complete_request();
ClientRequest::~ClientRequest()
{
- logger().debug("{}: destroying", *this);
+ LOG_PREFIX(ClientRequest::~ClientRequest);
+ DEBUGI("{}: destroying", *this);
}
void ClientRequest::print(std::ostream &lhs) const
void ClientRequest::dump_detail(Formatter *f) const
{
- logger().debug("{}: dumping", *this);
+ LOG_PREFIX(ClientRequest::dump_detail);
+ DEBUGI("{}: dumping", *this);
std::apply([f] (auto... event) {
(..., event.dump(f));
}, tracking_events);
seastar::future<> ClientRequest::with_pg_int(
ShardServices &shard_services, Ref<PG> pgref)
{
+ LOG_PREFIX(ClientRequest::with_pg_int);
epoch_t same_interval_since = pgref->get_interval_start_epoch();
- logger().debug("{} same_interval_since: {}", *this, same_interval_since);
+ DEBUGI("{} same_interval_since: {}", *this, same_interval_since);
if (m->finish_decode()) {
m->clear_payload();
}
auto &ihref = *instance_handle;
return interruptor::with_interruption(
[this, pgref, this_instance_id, &ihref, &shard_services]() mutable {
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{} start", *this);
PG &pg = *pgref;
if (pg.can_discard_op(*m)) {
return shard_services.send_incremental_map(
std::ref(*conn), m->get_map_epoch()
).then([this, this_instance_id, pgref] {
- logger().debug("{}.{}: discarding", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: discarding", *this, this_instance_id);
pgref->client_request_orderer.remove_request(*this);
complete_request();
return interruptor::now();
}
return ihref.enter_stage<interruptor>(client_pp(pg).await_map, *this
).then_interruptible([this, this_instance_id, &pg, &ihref] {
- logger().debug("{}.{}: after await_map stage", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: after await_map stage", *this, this_instance_id);
return ihref.enter_blocker(
*this, pg.osdmap_gate, &decltype(pg.osdmap_gate)::wait_for_map,
m->get_min_epoch(), nullptr);
}).then_interruptible([this, this_instance_id, &pg, &ihref](auto map) {
- logger().debug("{}.{}: after wait_for_map", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: after wait_for_map", *this, this_instance_id);
return ihref.enter_stage<interruptor>(client_pp(pg).wait_for_active, *this);
}).then_interruptible([this, this_instance_id, &pg, &ihref]() {
- logger().debug(
- "{}.{}: after wait_for_active stage", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: after wait_for_active stage", *this, this_instance_id);
return ihref.enter_blocker(
*this,
pg.wait_for_active_blocker,
&decltype(pg.wait_for_active_blocker)::wait);
}).then_interruptible([this, pgref, this_instance_id, &ihref]() mutable
-> interruptible_future<> {
- logger().debug(
- "{}.{}: after wait_for_active", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: after wait_for_active", *this, this_instance_id);
if (is_pg_op()) {
return process_pg_op(pgref);
} else {
logger().debug("{}.{}: complete", *this, this_instance_id);
return ihref.handle.complete();
}).then_interruptible([this, this_instance_id, pgref] {
- logger().debug("{}.{}: after process*", *this, this_instance_id);
+ LOG_PREFIX(ClientRequest::with_pg_int);
+ DEBUGI("{}.{}: after process*", *this, this_instance_id);
pgref->client_request_orderer.remove_request(*this);
complete_request();
});
}, [this, this_instance_id, pgref](std::exception_ptr eptr) {
+ LOG_PREFIX(ClientRequest::with_pg_int);
// TODO: better debug output
- logger().debug("{}.{}: interrupted {}", *this, this_instance_id, eptr);
+ DEBUGI("{}.{}: interrupted {}", *this, this_instance_id, eptr);
},
pgref
).finally(
auto ClientRequest::reply_op_error(const Ref<PG>& pg, int err)
{
- logger().debug("{}: replying with error {}", *this, err);
+ LOG_PREFIX(ClientRequest::reply_op_error);
+ DEBUGI("{}: replying with error {}", *this, err);
auto reply = crimson::make_message<MOSDOpReply>(
m.get(), err, pg->get_osdmap_epoch(),
m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK),
*this
).then_interruptible(
[this, pg]() mutable {
+ LOG_PREFIX(ClientRequest::process_op);
if (pg->is_primary()) {
return do_recover_missing(pg, m->get_hobj());
} else {
- logger().debug("process_op: Skipping do_recover_missing"
+ DEBUGI("process_op: Skipping do_recover_missing"
"on non primary pg");
return interruptor::now();
}
return ihref.enter_stage<interruptor>(client_pp(*pg).get_obc, *this
).then_interruptible(
[this, pg, &ihref]() mutable -> PG::load_obc_iertr::future<> {
- logger().debug("{}: in get_obc stage", *this);
+ LOG_PREFIX(ClientRequest::process_op);
+ DEBUGI("{}: in get_obc stage", *this);
op_info.set_from_op(&*m, *pg->get_osdmap());
return pg->with_locked_obc(
m->get_hobj(), op_info,
[this, pg, &ihref](auto obc) mutable {
- logger().debug("{}: got obc {}", *this, obc->obs);
+ LOG_PREFIX(ClientRequest::process_op);
+ DEBUGI("{}: got obc {}", *this, obc->obs);
return ihref.enter_stage<interruptor>(
client_pp(*pg).process, *this
).then_interruptible([this, pg, obc, &ihref]() mutable {
});
}).handle_error_interruptible(
PG::load_obc_ertr::all_same_way([this, pg=std::move(pg)](const auto &code) {
- logger().error("ClientRequest saw error code {}", code);
+ LOG_PREFIX(ClientRequest::process_op);
+ ERRORI("ClientRequest saw error code {}", code);
assert(code.value() > 0);
return reply_op_error(pg, -code.value());
}));
instance_handle_t &ihref,
Ref<PG>& pg, crimson::osd::ObjectContextRef obc)
{
+ LOG_PREFIX(ClientRequest::do_process);
if (m->has_flag(CEPH_OSD_FLAG_PARALLELEXEC)) {
return reply_op_error(pg, -EINVAL);
}
if (pool.has_flag(pg_pool_t::FLAG_EIO)) {
// drop op on the floor; the client will handle returning EIO
if (m->has_flag(CEPH_OSD_FLAG_SUPPORTSPOOLEIO)) {
- logger().debug("discarding op due to pool EIO flag");
+ DEBUGI("discarding op due to pool EIO flag");
return seastar::now();
} else {
- logger().debug("replying EIO due to pool EIO flag");
+ DEBUGI("replying EIO due to pool EIO flag");
return reply_op_error(pg, -EIO);
}
}
if ((m->has_flag(CEPH_OSD_FLAG_ORDERSNAP)) &&
snapc.seq < obc->ssc->snapset.seq) {
- logger().debug("{} ORDERSNAP flag set and snapc seq {}",
+ DEBUGI("{} ORDERSNAP flag set and snapc seq {}",
" < snapset seq {} on {}",
__func__, snapc.seq, obc->ssc->snapset.seq,
obc->obs.oi.soid);
if (!pg->is_primary()) {
// primary can handle both normal ops and balanced reads
if (is_misdirected(*pg)) {
- logger().trace("do_process: dropping misdirected op");
+ TRACEI("do_process: dropping misdirected op");
return seastar::now();
} else if (const hobject_t& hoid = m->get_hobj();
!pg->get_peering_state().can_serve_replica_read(hoid)) {
- logger().debug("{}: unstable write on replica, "
+ DEBUGI("{}: unstable write on replica, "
"bouncing to primary",
__func__);
return reply_op_error(pg, -EAGAIN);
} else {
- logger().debug("{}: serving replica read on oid {}",
+ DEBUGI("{}: serving replica read on oid {}",
__func__, m->get_hobj());
}
}
return ihref.enter_stage<interruptor>(client_pp(*pg).send_reply, *this
).then_interruptible(
[this, reply=std::move(reply)]() mutable {
- logger().debug("{}: sending response", *this);
+ LOG_PREFIX(ClientRequest::do_process);
+ DEBUGI("{}: sending response", *this);
return conn->send(std::move(reply));
});
}, crimson::ct_error::eagain::handle([this, pg, &ihref]() mutable {
Ref<PG>& pg,
crimson::osd::ObjectContextRef obc) const
{
+ LOG_PREFIX(ClientRequest::get_snapc);
SnapContext snapc;
if (op_info.may_write() || op_info.may_cache()) {
// snap
if (pg->get_pgpool().info.is_pool_snaps_mode()) {
// use pool's snapc
snapc = pg->get_pgpool().snapc;
- logger().debug("{} using pool's snapc snaps={}",
+ DEBUGI("{} using pool's snapc snaps={}",
__func__, snapc.snaps);
} else {
// client specified snapc
snapc.seq = m->get_snap_seq();
snapc.snaps = m->get_snaps();
- logger().debug("{} client specified snapc seq={} snaps={}",
+ DEBUGI("{} client specified snapc seq={} snaps={}",
__func__, snapc.seq, snapc.snaps);
}
}
};
}
+SET_SUBSYS(osd);
namespace crimson::osd {
InternalClientRequest::~InternalClientRequest()
{
- logger().debug("{}: destroying", *this);
+ LOG_PREFIX(InternalClientRequest::~InternalClientRequest);
+ DEBUGI("{}: destroying", *this);
}
void InternalClientRequest::print(std::ostream &) const
track_event<StartEvent>();
return crimson::common::handle_system_shutdown([this] {
return seastar::repeat([this] {
- logger().debug("{}: in repeat", *this);
+ LOG_PREFIX(InternalClientRequest::start);
+ DEBUGI("{}: in repeat", *this);
return interruptor::with_interruption([this]() mutable {
return enter_stage<interruptor>(
client_pp().wait_for_active
return enter_stage<interruptor>(
client_pp().get_obc);
}).then_interruptible([this] () -> PG::load_obc_iertr::future<> {
- logger().debug("{}: getting obc lock", *this);
+ LOG_PREFIX(InternalClientRequest::start);
+ DEBUGI("{}: getting obc lock", *this);
return seastar::do_with(create_osd_ops(),
[this](auto& osd_ops) mutable {
- logger().debug("InternalClientRequest: got {} OSDOps to execute",
+ LOG_PREFIX(InternalClientRequest::start);
+ DEBUGI("InternalClientRequest: got {} OSDOps to execute",
std::size(osd_ops));
[[maybe_unused]] const int ret = op_info.set_from_op(
std::as_const(osd_ops), pg->get_pgid().pgid, *pg->get_osdmap());
}
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
LogMissingRequest::LogMissingRequest(crimson::net::ConnectionRef&& conn,
seastar::future<> LogMissingRequest::with_pg(
ShardServices &shard_services, Ref<PG> pg)
{
- logger().debug("{}: LogMissingRequest::with_pg", *this);
+ LOG_PREFIX(LogMissingRequest::with_pg);
+ DEBUGI("{}: LogMissingRequest::with_pg", *this);
IRef ref = this;
return interruptor::with_interruption([this, pg] {
- logger().debug("{}: pg present", *this);
+ LOG_PREFIX(LogMissingRequest::with_pg);
+ DEBUGI("{}: pg present", *this);
return this->template enter_stage<interruptor>(client_pp(*pg).await_map
).then_interruptible([this, pg] {
return this->template with_blocking_event<
}
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
template <class T>
seastar::future<> PeeringEvent<T>::with_pg(
ShardServices &shard_services, Ref<PG> pg)
{
+ using interruptor = typename T::interruptor;
+ LOG_PREFIX(PeeringEvent<T>::with_pg);
if (!pg) {
- logger().warn("{}: pg absent, did not create", *this);
+ WARNI("{}: pg absent, did not create", *this);
on_pg_absent(shard_services);
that()->get_handle().exit();
return complete_rctx_no_pg(shard_services);
}
+ DEBUGI("start");
- using interruptor = typename T::interruptor;
return interruptor::with_interruption([this, pg, &shard_services] {
- logger().debug("{}: pg present", *this);
+ LOG_PREFIX(PeeringEvent<T>::with_pg);
+ DEBUGI("{} {}: pg present", interruptor::get_interrupt_cond(), *this);
return this->template enter_stage<interruptor>(peering_pp(*pg).await_map
).then_interruptible([this, pg] {
return this->template with_blocking_event<
return shard_services.send_pg_temp();
});
}, [this](std::exception_ptr ep) {
- logger().debug("{}: interrupted with {}", *this, ep);
+ LOG_PREFIX(PeeringEvent<T>::with_pg);
+ DEBUGI("{}: interrupted with {}", *this, ep);
}, pg).finally([this] {
logger().debug("{}: exit", *this);
that()->get_handle().exit();
template <class T>
void PeeringEvent<T>::on_pg_absent(ShardServices &)
{
- logger().debug("{}: pg absent, dropping", *this);
+ using interruptor = typename T::interruptor;
+ LOG_PREFIX(PeeringEvent<T>::on_pg_absent);
+ DEBUGI("{}: pg absent, dropping", *this);
}
template <class T>
typename PeeringEvent<T>::template interruptible_future<>
PeeringEvent<T>::complete_rctx(ShardServices &shard_services, Ref<PG> pg)
{
- logger().debug("{}: submitting ctx", *this);
+ using interruptor = typename T::interruptor;
+ LOG_PREFIX(PeeringEvent<T>::complete_rctx);
+ DEBUGI("{}: submitting ctx", *this);
return shard_services.dispatch_context(
pg->get_collection_ref(),
std::move(ctx));
seastar::future<> LocalPeeringEvent::start()
{
- logger().debug("{}: start", *this);
+ LOG_PREFIX(LocalPeeringEvent::start);
+ DEBUGI("{}: start", *this);
IRef ref = this;
auto maybe_delay = seastar::now();
return maybe_delay.then([this] {
return with_pg(pg->get_shard_services(), pg);
}).finally([ref=std::move(ref)] {
- logger().debug("{}: complete", *ref);
+ LOG_PREFIX(LocalPeeringEvent::start);
+ DEBUGI("{}: complete", *ref);
});
}
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
#include <fmt/format.h>
#include <fmt/ostream.h>
};
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
seastar::future<> RecoverySubRequest::with_pg(
ShardServices &shard_services, Ref<PG> pgref)
{
- logger().debug("{}: {}", "RecoverySubRequest::with_pg", *this);
-
track_event<StartEvent>();
IRef opref = this;
return interruptor::with_interruption([this, pgref] {
+ LOG_PREFIX(RecoverySubRequest::with_pg);
+ DEBUGI("{}: {}", "RecoverySubRequest::with_pg", *this);
return pgref->get_recovery_backend()->handle_recovery_op(m, conn
).then_interruptible([this] {
- logger().debug("{}: complete", *this);
+ LOG_PREFIX(RecoverySubRequest::with_pg);
+ DEBUGI("{}: complete", *this);
return handle.complete();
});
}, [](std::exception_ptr) {
}
}
+SET_SUBSYS(osd);
+
namespace crimson::osd {
RepRequest::RepRequest(crimson::net::ConnectionRef&& conn,
seastar::future<> RepRequest::with_pg(
ShardServices &shard_services, Ref<PG> pg)
{
- logger().debug("{}: RepRequest::with_pg", *this);
+ LOG_PREFIX(RepRequest::with_pg);
+ DEBUGI("{}: RepRequest::with_pg", *this);
IRef ref = this;
return interruptor::with_interruption([this, pg] {
- logger().debug("{}: pg present", *this);
+ LOG_PREFIX(RepRequest::with_pg);
+ DEBUGI("{}: pg present", *this);
return this->template enter_stage<interruptor>(client_pp(*pg).await_map
).then_interruptible([this, pg] {
return this->template with_blocking_event<