SET_SUBSYS(osd);
-namespace {
- seastar::logger& logger() {
- return crimson::get_logger(ceph_subsys_osd);
- }
-}
-
using std::map;
using std::set;
using PglogBasedRecovery = crimson::osd::PglogBasedRecovery;
size_t max_to_start)
{
LOG_PREFIX(PGRecovery::start_recovery_ops);
+ DEBUGDPP("{} max {}", pg->get_dpp(), recover_op, max_to_start);
assert(pg->is_primary());
assert(pg->is_peered());
if (pg->has_reset_since(recover_op.get_epoch_started()) ||
recover_op.is_cancelled()) {
- DEBUGDPP("recovery {} cancelled.", pg->get_pgid(), recover_op);
+ DEBUGDPP("recovery {} cancelled.", pg->get_dpp(), recover_op);
co_return seastar::stop_iteration::yes;
}
ceph_assert(pg->is_recovering());
//TODO: maybe we should implement a recovery race interruptor in the future
if (pg->has_reset_since(recover_op.get_epoch_started()) ||
recover_op.is_cancelled()) {
- DEBUGDPP("recovery {} cancelled.", pg->get_pgid(), recover_op);
+ DEBUGDPP("recovery {} cancelled.", pg->get_dpp(), recover_op);
co_return seastar::stop_iteration::yes;
}
ceph_assert(pg->is_recovering());
size_t max_to_start,
std::vector<PGRecovery::interruptible_future<>> *out)
{
+ LOG_PREFIX(PGRecovery::start_primary_recovery_ops);
if (!pg->is_recovering()) {
return 0;
}
const auto &missing = pg->get_peering_state().get_pg_log().get_missing();
- logger().info("{} recovering {} in pg {}, missing {}", __func__,
- pg->get_recovery_backend()->total_recovering(),
- *static_cast<crimson::osd::PG*>(pg),
- missing);
+ DEBUGDPP("recovering {} in pg {}, missing {}",
+ pg->get_dpp(),
+ pg->get_recovery_backend()->total_recovering(),
+ *static_cast<crimson::osd::PG*>(pg),
+ missing);
unsigned started = 0;
int skipped = 0;
hobject_t head = soid.get_head();
if (pg->get_peering_state().get_missing_loc().is_unfound(soid)) {
- logger().debug("{}: object {} unfound", __func__, soid);
+ DEBUGDPP("object {} unfound", pg->get_dpp(), soid);
++skipped;
++p;
continue;
}
if (pg->get_peering_state().get_missing_loc().is_unfound(head)) {
- logger().debug("{}: head object {} unfound", __func__, soid);
+ DEBUGDPP("head object {} unfound", pg->get_dpp(), soid);
++skipped;
++p;
continue;
++p;
bool head_missing = missing.is_missing(head);
- logger().info(
- "{} {} item.need {} {} {} {} {}",
- __func__,
+ DEBUGDPP(
+ "{} item.need {} {} {} {} {}",
+ pg->get_dpp(),
soid,
item.need,
missing.is_missing(soid) ? " (missing)":"",
pg->get_peering_state().set_last_requested(v);
}
- logger().info("{} started {} skipped {}", __func__, started, skipped);
+ DEBUGDPP("started {} skipped {}", pg->get_dpp(), started, skipped);
return started;
}
size_t max_to_start,
std::vector<PGRecovery::interruptible_future<>> *out)
{
+ LOG_PREFIX(PGRecovery::start_replica_recovery_ops);
if (!pg->is_recovering()) {
return 0;
}
assert(peer != pg->get_peering_state().get_primary());
const auto& pm = pg->get_peering_state().get_peer_missing(peer);
- logger().debug("{}: peer osd.{} missing {} objects", __func__,
- peer, pm.num_missing());
- logger().trace("{}: peer osd.{} missing {}", __func__,
- peer, pm.get_items());
+ DEBUGDPP("peer osd.{} missing {} objects", pg->get_dpp(), peer, pm.num_missing());
+ TRACEDPP("peer osd.{} missing {}", pg->get_dpp(), peer, pm.get_items());
// recover oldest first
for (auto p = pm.get_rmissing().begin();
const auto &soid = p->second;
if (pg->get_peering_state().get_missing_loc().is_unfound(soid)) {
- logger().debug("{}: object {} still unfound", __func__, soid);
+ DEBUGDPP("object {} still unfound", pg->get_dpp(), soid);
continue;
}
const pg_info_t &pi = pg->get_peering_state().get_peer_info(peer);
if (soid > pi.last_backfill) {
if (!pg->get_recovery_backend()->is_recovering(soid)) {
- logger().error(
- "{}: object {} in missing set for backfill (last_backfill {})"
+ ERRORDPP(
+ "object {} in missing set for backfill (last_backfill {})"
" but not in recovering",
- __func__,
+ pg->get_dpp(),
soid,
pi.last_backfill);
- ceph_abort();
+ ceph_abort_msg("start_replica_recovery_ops error");
}
continue;
}
if (pg->get_recovery_backend()->is_recovering(soid)) {
- logger().debug("{}: already recovering object {}", __func__, soid);
+ DEBUGDPP("already recovering object {}", pg->get_dpp(), soid);
auto& recovery_waiter = pg->get_recovery_backend()->get_recovering(soid);
out->emplace_back(recovery_waiter.wait_for_recovered(trigger));
started++;
}
if (pg->get_peering_state().get_missing_loc().is_deleted(soid)) {
- logger().debug("{}: soid {} is a delete, removing", __func__, soid);
+ DEBUGDPP("soid {} is a delete, removing", pg->get_dpp(), soid);
map<hobject_t,pg_missing_item>::const_iterator r =
pm.get_items().find(soid);
started++;
if (soid.is_snap() &&
pg->get_peering_state().get_pg_log().get_missing().is_missing(
soid.get_head())) {
- logger().debug("{}: head {} still missing on primary", __func__,
- soid.get_head());
+ DEBUGDPP("head {} still missing on primary", pg->get_dpp(), soid.get_head());
continue;
}
if (pg->get_peering_state().get_pg_log().get_missing().is_missing(soid)) {
- logger().debug("{}: soid {} still missing on primary", __func__, soid);
+ DEBUGDPP("soid {} still missing on primary", pg->get_dpp(), soid);
continue;
}
- logger().debug("{}: recover_object_replicas({})", __func__,soid);
+ DEBUGDPP("recover_object_replicas({})", pg->get_dpp(), soid);
map<hobject_t,pg_missing_item>::const_iterator r = pm.get_items().find(
soid);
started++;
eversion_t need,
bool with_throttle)
{
- logger().info("{} {} v {}", __func__, soid, need);
+ LOG_PREFIX(PGRecovery::recover_missing);
+ DEBUGDPP("{} v {}", pg->get_dpp(), soid, need);
auto [recovering, added] = pg->get_recovery_backend()->add_recovering(soid);
if (added) {
- logger().info("{} {} v {}, new recovery", __func__, soid, need);
+ DEBUGDPP("{} v {}, new recovery", pg->get_dpp(), soid, need);
if (pg->get_peering_state().get_missing_loc().is_deleted(soid)) {
return recovering.wait_track_blocking(
trigger,
const hobject_t& soid,
eversion_t need)
{
- logger().info("{} {} v {}", __func__, soid, need);
+ LOG_PREFIX(PGRecovery::prep_object_replica_deletes);
+ DEBUGDPP("soid {} need v {}", pg->get_dpp(), soid, need);
auto [recovering, added] = pg->get_recovery_backend()->add_recovering(soid);
if (added) {
- logger().info("{} {} v {}, new recovery", __func__, soid, need);
+ DEBUGDPP("soid {} v {}, new recovery", pg->get_dpp(), soid, need);
return recovering.wait_track_blocking(
trigger,
pg->get_recovery_backend()->push_delete(soid, need).then_interruptible(
const hobject_t& soid,
eversion_t need)
{
- logger().info("{} {} v {}", __func__, soid, need);
+ LOG_PREFIX(PGRecovery::prep_object_replica_pushes);
+ DEBUGDPP("soid {} v {}", pg->get_dpp(), soid, need);
auto [recovering, added] = pg->get_recovery_backend()->add_recovering(soid);
if (added) {
- logger().info("{} {} v {}, new recovery", __func__, soid, need);
+ DEBUGDPP("soid {} v {}, new recovery", pg->get_dpp(), soid, need);
return recovering.wait_track_blocking(
trigger,
recover_object_with_throttle(soid, need)
const bool is_delete,
ceph::os::Transaction& t)
{
+ LOG_PREFIX(PGRecovery::on_local_recover);
+ DEBUGDPP("{}", pg->get_dpp(), soid);
if (const auto &log = pg->get_peering_state().get_pg_log();
!is_delete &&
log.get_missing().is_missing(recovery_info.soid) &&
assert(pg->is_primary());
if (const auto* latest = log.get_log().objects.find(recovery_info.soid)->second;
latest->op == pg_log_entry_t::LOST_REVERT) {
- ceph_abort("mark_unfound_lost (LOST_REVERT) is not implemented yet");
+ ceph_abort_msg("mark_unfound_lost (LOST_REVERT) is not implemented yet");
}
}
const object_stat_sum_t& stat_diff,
const bool is_delete)
{
- logger().info("{} {}", __func__, soid);
+ LOG_PREFIX(PGRecovery::on_global_recover);
+ DEBUGDPP("{}", pg->get_dpp(), soid);
pg->get_peering_state().object_recovered(soid, stat_diff);
pg->publish_stats_to_osd();
auto& recovery_waiter = pg->get_recovery_backend()->get_recovering(soid);
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info)
{
- crimson::get_logger(ceph_subsys_osd).debug(
- "{}: {}, {} on {}", __func__, oid,
- recovery_info.version, peer);
+ LOG_PREFIX(PGRecovery::on_peer_recover);
+ DEBUGDPP("{}, {} on {}", pg->get_dpp(), oid, recovery_info.version, peer);
pg->get_peering_state().on_peer_recover(peer, oid, recovery_info.version);
}
void PGRecovery::_committed_pushed_object(epoch_t epoch,
eversion_t last_complete)
{
+ LOG_PREFIX(PGRecovery::_committed_pushed_object);
if (!pg->has_reset_since(epoch)) {
pg->get_peering_state().recovery_committed_to(last_complete);
} else {
- crimson::get_logger(ceph_subsys_osd).debug(
- "{} pg has changed, not touching last_complete_ondisk",
- __func__);
+ DEBUGDPP("pg has changed, not touching last_complete_ondisk", pg->get_dpp());
}
}
const hobject_t& begin,
const hobject_t& end)
{
- logger().debug("{}: target.osd={}", __func__, target.osd);
+ LOG_PREFIX(PGRecovery::request_replica_scan);
+ DEBUGDPP("target.osd={}", pg->get_dpp(), target.osd);
auto msg = crimson::make_message<MOSDPGScan>(
MOSDPGScan::OP_SCAN_GET_DIGEST,
pg->get_pg_whoami(),
void PGRecovery::request_primary_scan(
const hobject_t& begin)
{
- logger().debug("{}", __func__);
+ LOG_PREFIX(PGRecovery::request_primary_scan);
+ DEBUGDPP("begin {}", pg->get_dpp(), begin);
using crimson::common::local_conf;
std::ignore = pg->get_recovery_backend()->scan_for_backfill_primary(
begin,
local_conf()->osd_backfill_scan_max,
pg->get_peering_state().get_backfill_targets()
).then_interruptible([this] (PrimaryBackfillInterval bi) {
- logger().debug("request_primary_scan:{}", __func__);
- using BackfillState = crimson::osd::BackfillState;
+ using BackfillState = crimson::osd::BackfillState;
backfill_state->process_event(
BackfillState::PrimaryScanned{ std::move(bi) }.intrusive_from_this());
});
const eversion_t& v,
const std::vector<pg_shard_t> &peers)
{
- logger().info("{}: obj={} v={} peers={}", __func__, obj, v, peers);
+ LOG_PREFIX(PGRecovery::enqueue_push);
+ DEBUGDPP("obj={} v={} peers={}", pg->get_dpp(), obj, v, peers);
auto &peering_state = pg->get_peering_state();
auto [recovering, added] = pg->get_recovery_backend()->add_recovering(obj);
if (!added)
handle_exception_interruptible([] (auto) {
ceph_abort_msg("got exception on backfill's push");
return seastar::make_ready_future<>();
- }).then_interruptible([this, obj] {
- logger().debug("enqueue_push:{}", __func__);
+ }).then_interruptible([this, FNAME, obj] {
+
+ DEBUGDPP("enqueue_push", pg->get_dpp());
using BackfillState = crimson::osd::BackfillState;
if (backfill_state->is_triggered()) {
backfill_state->post_event(
const hobject_t& obj,
const eversion_t& v)
{
+ LOG_PREFIX(PGRecovery::update_peers_last_backfill);
+ DEBUGDPP("obj={} v={} target={}", pg->get_dpp(), obj, v, target);
// allocate a pair if target is seen for the first time
auto& req = backfill_drop_requests[target];
if (!req) {
void PGRecovery::update_peers_last_backfill(
const hobject_t& new_last_backfill)
{
- logger().debug("{}: new_last_backfill={}",
- __func__, new_last_backfill);
+ LOG_PREFIX(PGRecovery::update_peers_last_backfill);
+ DEBUGDPP("new_last_backfill={}", pg->get_dpp(), new_last_backfill);
// If new_last_backfill == MAX, then we will send OP_BACKFILL_FINISH to
// all the backfill targets. Otherwise, we will move last_backfill up on
// those targets need it and send OP_BACKFILL_PROGRESS to them.
m->stats = pinfo.stats;
std::ignore = pg->get_shard_services().send_to_osd(
bt.osd, std::move(m), pg->get_osdmap_epoch());
- logger().info("{}: peer {} num_objects now {} / {}",
- __func__,
- bt,
- pinfo.stats.stats.sum.num_objects,
- pg->get_info().stats.stats.sum.num_objects);
+ DEBUGDPP("peer {} num_objects now {} / {}",
+ pg->get_dpp(),
+ bt,
+ pinfo.stats.stats.sum.num_objects,
+ pg->get_info().stats.stats.sum.num_objects);
}
}
}
void PGRecovery::backfilled()
{
LOG_PREFIX(PGRecovery::backfilled);
- DEBUGDPP("", pg->get_pgid());
+ DEBUGDPP("", pg->get_dpp());
start_peering_event_operation_listener(PeeringState::Backfilled());
}
void PGRecovery::request_backfill()
{
LOG_PREFIX(PGRecovery::request_backfill);
- DEBUGDPP("", pg->get_pgid());
+ DEBUGDPP("", pg->get_dpp());
start_peering_event_operation_listener(PeeringState::RequestBackfill());
}
void PGRecovery::all_replicas_recovered()
{
LOG_PREFIX(PGRecovery::all_replicas_recovered);
- DEBUGDPP("", pg->get_pgid());
+ DEBUGDPP("", pg->get_dpp());
start_peering_event_operation_listener(PeeringState::AllReplicasRecovered());
}
void PGRecovery::backfill_suspended()
{
+ LOG_PREFIX(PGRecovery::backfill_suspended);
+ DEBUGDPP("", pg->get_dpp());
using BackfillState = crimson::osd::BackfillState;
backfill_state->process_event(
BackfillState::SuspendBackfill{}.intrusive_from_this());
void PGRecovery::dispatch_backfill_event(
boost::intrusive_ptr<const boost::statechart::event_base> evt)
{
- logger().debug("{}", __func__);
+ LOG_PREFIX(PGRecovery::dispatch_backfill_event);
+ DEBUGDPP("", pg->get_dpp());
assert(backfill_state);
backfill_state->process_event(evt);
// TODO: Do we need to worry about cases in which the pg has
void PGRecovery::on_activate_complete()
{
- logger().debug("{} backfill_state={}",
- __func__, fmt::ptr(backfill_state.get()));
+ LOG_PREFIX(PGRecovery::on_activate_complete);
+ DEBUGDPP("backfill_state={}", pg->get_dpp(), fmt::ptr(backfill_state.get()));
backfill_state.reset();
}
void PGRecovery::on_backfill_reserved()
{
- logger().debug("{}", __func__);
+ LOG_PREFIX(PGRecovery::on_backfill_reserved);
+ DEBUGDPP("", pg->get_dpp());
ceph_assert(pg->get_peering_state().is_backfilling());
// let's be lazy with creating the backfill stuff
using BackfillState = crimson::osd::BackfillState;