]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/scrub: Add debug logging
authorAishwarya Mathuria <amathuri@redhat.com>
Thu, 13 Nov 2025 16:56:03 +0000 (16:56 +0000)
committerAishwarya Mathuria <amathuri@redhat.com>
Wed, 7 Jan 2026 04:59:36 +0000 (10:29 +0530)
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
src/crimson/mon/MonClient.cc
src/crimson/osd/heartbeat.cc
src/crimson/osd/osd.cc
src/crimson/osd/scrub/scrub_machine.cc
src/crimson/osd/shard_services.cc

index fb60645f8688ef10e9966ae0666518d132f5c40d..77830b55b580f91fcb164cc70af9859183161efb 100644 (file)
@@ -1139,6 +1139,7 @@ bool Client::sub_want_increment(const std::string& what,
                                 version_t start,
                                 unsigned flags)
 {
+  logger().debug("{} for {} epoch {}", __func__, what, start);
   return sub.inc_want(what, start, flags);
 }
 
@@ -1149,6 +1150,7 @@ seastar::future<> Client::renew_subs()
     return seastar::now();
   }
   logger().trace("{}", __func__);
+  logger().debug("{}", __func__);
 
   auto m = crimson::make_message<MMonSubscribe>();
   m->what = sub.get_subs();
index 184301d091da1938fcebd4854d66faf01d7f1965..468f86512a1eec36505ee7c14ccc3e4aa6d7a5b8 100644 (file)
@@ -311,12 +311,14 @@ seastar::future<> Heartbeat::maybe_share_osdmap(
 {
   const osd_id_t from = m->get_source().num();
   const epoch_t current_osdmap_epoch = service.get_map()->get_epoch();
+  logger().debug("{} current_osdmap_epoch", __func__, current_osdmap_epoch);
   auto found = peers.find(from);
   if (found == peers.end()) {
     return seastar::now();
   }
   auto& peer = found->second;
-
+  logger().debug(" peer {}, peer projected epoch: {} m->map_epoch: {}",
+    peer, peer.get_projected_epoch(), m->map_epoch);
   if (m->map_epoch > peer.get_projected_epoch()) {
     logger().debug("{} updating peer {} session's projected_epoch"
                    "from {} to ping map epoch of {}",
index b914b3fc42bcb8cdf0bb6def3be32d08f6382499..13f8479bc05ceb0390804477bd3599c72a3347ac 100644 (file)
@@ -1184,6 +1184,7 @@ seastar::future<> OSD::_handle_osd_map(Ref<MOSDMap> m)
   }
   // missing some?
   epoch_t start = superblock.get_newest_map() + 1;
+  DEBUG(" start epoch: {}", start);
   if (first > start) {
     INFO("message skips epochs {}..{}",
         start, first - 1);
@@ -1289,18 +1290,20 @@ seastar::future<> OSD::committed_osd_maps(
     INFO("osd.{}: committed_osd_maps: broadcasting osdmaps up"
          " to {} epoch to pgs", whoami, osdmap->get_epoch());
     co_await pg_shard_manager.broadcast_map_to_pgs(osdmap->get_epoch());
+    INFO("broadcasting up to {} epoch is done", osdmap->get_epoch);
   } else {
     if (pg_shard_manager.is_prestop()) {
       got_stop_ack();
     }
   }
-
+  DEBUG("!pg_shard_manager.is_stopping() = {}", !pg_shard_manager.is_stopping());
   if (!pg_shard_manager.is_stopping()) {
     /*
      * TODO: Missing start_waiting_for_healthy() counterpart.
      * Only subscribe to the next map until implemented.
      * See https://tracker.ceph.com/issues/66832
     */
+    DEBUG(" osdmap subscribe to {}", osdmap->get_epoch() + 1);
     co_await get_shard_services().osdmap_subscribe(osdmap->get_epoch() + 1, false);
   }
   if (pg_shard_manager.is_active()) {
index 44b61fc9d9cd617a744de4641ab58729509b6976..b1b999a53fd06a3f44cb45d72b6d961f27da90df 100644 (file)
@@ -3,8 +3,11 @@
 
 #include "include/ceph_assert.h"
 
+#include "crimson/common/log.h"
 #include "crimson/osd/scrub/scrub_machine.h"
 
+SET_SUBSYS(osd);
+
 namespace crimson::osd::scrub {
 
 WaitUpdate::WaitUpdate(my_context ctx) : ScrubState(ctx)
@@ -32,31 +35,42 @@ ScanRange::ScanRange(my_context ctx) : ScrubState(ctx)
 
 sc::result ScanRange::react(const ScrubContext::scan_range_complete_t &event)
 {
+  LOG_PREFIX(ScanRange::react);
+  DEBUGDPP(" Got ScrubContext::scan_range_complete_t", get_scrub_context().get_dpp());
   auto [_, inserted] = maps.insert(event.value.to_pair());
   ceph_assert(inserted);
   ceph_assert(waiting_on > 0);
+  DEBUGDPP(" Waiting on: {}, before decrement! ", get_scrub_context().get_dpp(), waiting_on);
   --waiting_on;
+  DEBUGDPP(" Waiting on: {}, after decrement! ", get_scrub_context().get_dpp(), waiting_on);
 
   if (waiting_on > 0) {
+    DEBUGDPP(" Waiting on: {}, discarding event! ", get_scrub_context().get_dpp(), waiting_on);
     return discard_event();
   } else {
+    DEBUGDPP(" ceph_assert(context<ChunkState>().range)", get_scrub_context().get_dpp());
     ceph_assert(context<ChunkState>().range);
     {
+      DEBUGDPP(" Calling validate_chunk", get_scrub_context().get_dpp());
       auto results = validate_chunk(
        get_scrub_context().get_dpp(),
        context<Scrubbing>().policy,
        maps);
+      DEBUGDPP(" From validate_chunk Scrub errors: {} ", get_scrub_context().get_dpp(), results.stats.num_scrub_errors);
       context<Scrubbing>().stats.add(results.stats);
+      DEBUGDPP(" Calling emit_chunk_result", get_scrub_context().get_dpp());
       get_scrub_context().emit_chunk_result(
        *(context<ChunkState>().range),
        std::move(results));
     }
     if (context<ChunkState>().range->end.is_max()) {
+      DEBUGDPP(" Calling emit_scrub_result", get_scrub_context().get_dpp());
       get_scrub_context().emit_scrub_result(
        context<Scrubbing>().deep,
        context<Scrubbing>().stats);
       return transit<PrimaryActive>();
     } else {
+      DEBUGDPP(" Before advance_current to ChunkState", get_scrub_context().get_dpp());
       context<Scrubbing>().advance_current(
        context<ChunkState>().range->end);
       return transit<ChunkState>();
index 09da0412416335767ef79faa0e563ed975fe2455..5219c276a93d7a4509a4b698acdb506f645580da 100644 (file)
@@ -188,6 +188,7 @@ seastar::future<> OSDSingletonState::osdmap_subscribe(
 {
   LOG_PREFIX(OSDSingletonState::osdmap_subscribe);
   INFO("epoch {}", epoch);
+  INFO("force request? {}", force_request);
   if (monc.sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) ||
       force_request) {
     return monc.renew_subs();
@@ -377,7 +378,10 @@ void OSDSingletonState::store_map_bl(
   ceph::os::Transaction& t,
   epoch_t e, bufferlist&& bl)
 {
+  LOG_PREFIX(OSDSingletonState::store_map_bl);
+  DEBUG(" storing osdmap.{}", e);
   meta_coll->store_map(t, e, bl);
+  DEBUG(" stored {} meta_coll", e);
   map_bl_cache.insert(e, std::move(bl));
 }
 
@@ -385,7 +389,10 @@ void OSDSingletonState::store_inc_map_bl(
   ceph::os::Transaction& t,
   epoch_t e, bufferlist&& bl)
 {
+  LOG_PREFIX(OSDSingletonState::store_inc_map_bl);
+  DEBUG(" storing osdmap.{}", e);
   meta_coll->store_inc_map(t, e, bl);
+  DEBUG(" stored {} in meta_coll", e);
   inc_map_bl_cache.insert(e, std::move(bl));
 }
 
@@ -843,6 +850,7 @@ seastar::future<MURef<MOSDMap>> OSDSingletonState::build_incremental_map_msg(
   epoch_t last)
 {
   LOG_PREFIX(OSDSingletonState::build_incremental_map_msg);
+  DEBUG(" first {} last {}", first, last);
   return seastar::do_with(crimson::common::local_conf()->osd_map_message_max,
                           crimson::make_message<MOSDMap>(
                             monc.get_fsid(),
@@ -858,6 +866,7 @@ seastar::future<MURef<MOSDMap>> OSDSingletonState::build_incremental_map_msg(
       // we don't have the next map the target wants,
       // so start with a full map.
       first = superblock.cluster_osdmap_trim_lower_bound;
+      DEBUG("going to load_map_bl got {}", first);
       maybe_handle_mapgap = load_map_bl(first).then(
       [&first, &map_message_max, &m](auto&& bl) {
         m->maps[first] = std::move(bl);
@@ -865,12 +874,14 @@ seastar::future<MURef<MOSDMap>> OSDSingletonState::build_incremental_map_msg(
         ++first;
       });
     }
-    return maybe_handle_mapgap.then([this, first, last, &map_message_max, &m] {
+    return maybe_handle_mapgap.then([this, FNAME, first, last, &map_message_max, &m] {
       if (first > last) {
         // first may be later than last in the case of map gap
+        DEBUG(" there is a mapgap! {} > {} ", first, last);
         ceph_assert(!m->maps.empty());
         return seastar::make_ready_future<MURef<MOSDMap>>(std::move(m));
       }
+      DEBUG(" going to load_map_bls");
       return load_map_bls(
         first,
         ((last - first) > map_message_max) ? (first + map_message_max) : last
@@ -917,13 +928,14 @@ seastar::future<> OSDSingletonState::send_incremental_map_to_osd(
   int osd,
   epoch_t first)
 {
-  LOG_PREFIX(OSDSingletonState::send_incremental_map);
+  LOG_PREFIX(OSDSingletonState::send_incremental_map_to_osd);
   if (osdmap->is_down(osd)) {
     INFO("osd.{} is_down", osd);
     return seastar::now();
   } else {
     auto conn = cluster_msgr.connect(
       osdmap->get_cluster_addrs(osd).front(), CEPH_ENTITY_TYPE_OSD);
+    DEBUG(" sending {} to osd.{}", first, osd);
     return send_incremental_map(*conn, first);
   }
 }