From: Aishwarya Mathuria Date: Thu, 13 Nov 2025 16:56:03 +0000 (+0000) Subject: crimson/scrub: Add debug logging X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=d8cbca442aa937de77a248452d3c28e938c0e8d3;p=ceph-ci.git crimson/scrub: Add debug logging Signed-off-by: Aishwarya Mathuria --- diff --git a/src/crimson/mon/MonClient.cc b/src/crimson/mon/MonClient.cc index fb60645f868..77830b55b58 100644 --- a/src/crimson/mon/MonClient.cc +++ b/src/crimson/mon/MonClient.cc @@ -1139,6 +1139,7 @@ bool Client::sub_want_increment(const std::string& what, version_t start, unsigned flags) { + logger().debug("{} for {} epoch {}", __func__, what, start); return sub.inc_want(what, start, flags); } @@ -1149,6 +1150,7 @@ seastar::future<> Client::renew_subs() return seastar::now(); } logger().trace("{}", __func__); + logger().debug("{}", __func__); auto m = crimson::make_message(); m->what = sub.get_subs(); diff --git a/src/crimson/osd/heartbeat.cc b/src/crimson/osd/heartbeat.cc index 184301d091d..468f86512a1 100644 --- a/src/crimson/osd/heartbeat.cc +++ b/src/crimson/osd/heartbeat.cc @@ -311,12 +311,14 @@ seastar::future<> Heartbeat::maybe_share_osdmap( { const osd_id_t from = m->get_source().num(); const epoch_t current_osdmap_epoch = service.get_map()->get_epoch(); + logger().debug("{} current_osdmap_epoch", __func__, current_osdmap_epoch); auto found = peers.find(from); if (found == peers.end()) { return seastar::now(); } auto& peer = found->second; - + logger().debug(" peer {}, peer projected epoch: {} m->map_epoch: {}", + peer, peer.get_projected_epoch(), m->map_epoch); if (m->map_epoch > peer.get_projected_epoch()) { logger().debug("{} updating peer {} session's projected_epoch" "from {} to ping map epoch of {}", diff --git a/src/crimson/osd/osd.cc b/src/crimson/osd/osd.cc index b914b3fc42b..13f8479bc05 100644 --- a/src/crimson/osd/osd.cc +++ b/src/crimson/osd/osd.cc @@ -1184,6 +1184,7 @@ seastar::future<> OSD::_handle_osd_map(Ref m) } // missing some? epoch_t start = superblock.get_newest_map() + 1; + DEBUG(" start epoch: {}", start); if (first > start) { INFO("message skips epochs {}..{}", start, first - 1); @@ -1289,18 +1290,20 @@ seastar::future<> OSD::committed_osd_maps( INFO("osd.{}: committed_osd_maps: broadcasting osdmaps up" " to {} epoch to pgs", whoami, osdmap->get_epoch()); co_await pg_shard_manager.broadcast_map_to_pgs(osdmap->get_epoch()); + INFO("broadcasting up to {} epoch is done", osdmap->get_epoch); } else { if (pg_shard_manager.is_prestop()) { got_stop_ack(); } } - + DEBUG("!pg_shard_manager.is_stopping() = {}", !pg_shard_manager.is_stopping()); if (!pg_shard_manager.is_stopping()) { /* * TODO: Missing start_waiting_for_healthy() counterpart. * Only subscribe to the next map until implemented. * See https://tracker.ceph.com/issues/66832 */ + DEBUG(" osdmap subscribe to {}", osdmap->get_epoch() + 1); co_await get_shard_services().osdmap_subscribe(osdmap->get_epoch() + 1, false); } if (pg_shard_manager.is_active()) { diff --git a/src/crimson/osd/scrub/scrub_machine.cc b/src/crimson/osd/scrub/scrub_machine.cc index 44b61fc9d9c..b1b999a53fd 100644 --- a/src/crimson/osd/scrub/scrub_machine.cc +++ b/src/crimson/osd/scrub/scrub_machine.cc @@ -3,8 +3,11 @@ #include "include/ceph_assert.h" +#include "crimson/common/log.h" #include "crimson/osd/scrub/scrub_machine.h" +SET_SUBSYS(osd); + namespace crimson::osd::scrub { WaitUpdate::WaitUpdate(my_context ctx) : ScrubState(ctx) @@ -32,31 +35,42 @@ ScanRange::ScanRange(my_context ctx) : ScrubState(ctx) sc::result ScanRange::react(const ScrubContext::scan_range_complete_t &event) { + LOG_PREFIX(ScanRange::react); + DEBUGDPP(" Got ScrubContext::scan_range_complete_t", get_scrub_context().get_dpp()); auto [_, inserted] = maps.insert(event.value.to_pair()); ceph_assert(inserted); ceph_assert(waiting_on > 0); + DEBUGDPP(" Waiting on: {}, before decrement! ", get_scrub_context().get_dpp(), waiting_on); --waiting_on; + DEBUGDPP(" Waiting on: {}, after decrement! ", get_scrub_context().get_dpp(), waiting_on); if (waiting_on > 0) { + DEBUGDPP(" Waiting on: {}, discarding event! ", get_scrub_context().get_dpp(), waiting_on); return discard_event(); } else { + DEBUGDPP(" ceph_assert(context().range)", get_scrub_context().get_dpp()); ceph_assert(context().range); { + DEBUGDPP(" Calling validate_chunk", get_scrub_context().get_dpp()); auto results = validate_chunk( get_scrub_context().get_dpp(), context().policy, maps); + DEBUGDPP(" From validate_chunk Scrub errors: {} ", get_scrub_context().get_dpp(), results.stats.num_scrub_errors); context().stats.add(results.stats); + DEBUGDPP(" Calling emit_chunk_result", get_scrub_context().get_dpp()); get_scrub_context().emit_chunk_result( *(context().range), std::move(results)); } if (context().range->end.is_max()) { + DEBUGDPP(" Calling emit_scrub_result", get_scrub_context().get_dpp()); get_scrub_context().emit_scrub_result( context().deep, context().stats); return transit(); } else { + DEBUGDPP(" Before advance_current to ChunkState", get_scrub_context().get_dpp()); context().advance_current( context().range->end); return transit(); diff --git a/src/crimson/osd/shard_services.cc b/src/crimson/osd/shard_services.cc index 09da0412416..5219c276a93 100644 --- a/src/crimson/osd/shard_services.cc +++ b/src/crimson/osd/shard_services.cc @@ -188,6 +188,7 @@ seastar::future<> OSDSingletonState::osdmap_subscribe( { LOG_PREFIX(OSDSingletonState::osdmap_subscribe); INFO("epoch {}", epoch); + INFO("force request? {}", force_request); if (monc.sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) || force_request) { return monc.renew_subs(); @@ -377,7 +378,10 @@ void OSDSingletonState::store_map_bl( ceph::os::Transaction& t, epoch_t e, bufferlist&& bl) { + LOG_PREFIX(OSDSingletonState::store_map_bl); + DEBUG(" storing osdmap.{}", e); meta_coll->store_map(t, e, bl); + DEBUG(" stored {} meta_coll", e); map_bl_cache.insert(e, std::move(bl)); } @@ -385,7 +389,10 @@ void OSDSingletonState::store_inc_map_bl( ceph::os::Transaction& t, epoch_t e, bufferlist&& bl) { + LOG_PREFIX(OSDSingletonState::store_inc_map_bl); + DEBUG(" storing osdmap.{}", e); meta_coll->store_inc_map(t, e, bl); + DEBUG(" stored {} in meta_coll", e); inc_map_bl_cache.insert(e, std::move(bl)); } @@ -843,6 +850,7 @@ seastar::future> OSDSingletonState::build_incremental_map_msg( epoch_t last) { LOG_PREFIX(OSDSingletonState::build_incremental_map_msg); + DEBUG(" first {} last {}", first, last); return seastar::do_with(crimson::common::local_conf()->osd_map_message_max, crimson::make_message( monc.get_fsid(), @@ -858,6 +866,7 @@ seastar::future> OSDSingletonState::build_incremental_map_msg( // we don't have the next map the target wants, // so start with a full map. first = superblock.cluster_osdmap_trim_lower_bound; + DEBUG("going to load_map_bl got {}", first); maybe_handle_mapgap = load_map_bl(first).then( [&first, &map_message_max, &m](auto&& bl) { m->maps[first] = std::move(bl); @@ -865,12 +874,14 @@ seastar::future> OSDSingletonState::build_incremental_map_msg( ++first; }); } - return maybe_handle_mapgap.then([this, first, last, &map_message_max, &m] { + return maybe_handle_mapgap.then([this, FNAME, first, last, &map_message_max, &m] { if (first > last) { // first may be later than last in the case of map gap + DEBUG(" there is a mapgap! {} > {} ", first, last); ceph_assert(!m->maps.empty()); return seastar::make_ready_future>(std::move(m)); } + DEBUG(" going to load_map_bls"); return load_map_bls( first, ((last - first) > map_message_max) ? (first + map_message_max) : last @@ -917,13 +928,14 @@ seastar::future<> OSDSingletonState::send_incremental_map_to_osd( int osd, epoch_t first) { - LOG_PREFIX(OSDSingletonState::send_incremental_map); + LOG_PREFIX(OSDSingletonState::send_incremental_map_to_osd); if (osdmap->is_down(osd)) { INFO("osd.{} is_down", osd); return seastar::now(); } else { auto conn = cluster_msgr.connect( osdmap->get_cluster_addrs(osd).front(), CEPH_ENTITY_TYPE_OSD); + DEBUG(" sending {} to osd.{}", first, osd); return send_incremental_map(*conn, first); } }