version_t start,
unsigned flags)
{
+ logger().debug("{} for {} epoch {}", __func__, what, start);
return sub.inc_want(what, start, flags);
}
return seastar::now();
}
logger().trace("{}", __func__);
+ logger().debug("{}", __func__);
auto m = crimson::make_message<MMonSubscribe>();
m->what = sub.get_subs();
{
const osd_id_t from = m->get_source().num();
const epoch_t current_osdmap_epoch = service.get_map()->get_epoch();
+ logger().debug("{} current_osdmap_epoch", __func__, current_osdmap_epoch);
auto found = peers.find(from);
if (found == peers.end()) {
return seastar::now();
}
auto& peer = found->second;
-
+ logger().debug(" peer {}, peer projected epoch: {} m->map_epoch: {}",
+ peer, peer.get_projected_epoch(), m->map_epoch);
if (m->map_epoch > peer.get_projected_epoch()) {
logger().debug("{} updating peer {} session's projected_epoch"
"from {} to ping map epoch of {}",
}
// missing some?
epoch_t start = superblock.get_newest_map() + 1;
+ DEBUG(" start epoch: {}", start);
if (first > start) {
INFO("message skips epochs {}..{}",
start, first - 1);
INFO("osd.{}: committed_osd_maps: broadcasting osdmaps up"
" to {} epoch to pgs", whoami, osdmap->get_epoch());
co_await pg_shard_manager.broadcast_map_to_pgs(osdmap->get_epoch());
+ INFO("broadcasting up to {} epoch is done", osdmap->get_epoch);
} else {
if (pg_shard_manager.is_prestop()) {
got_stop_ack();
}
}
-
+ DEBUG("!pg_shard_manager.is_stopping() = {}", !pg_shard_manager.is_stopping());
if (!pg_shard_manager.is_stopping()) {
/*
* TODO: Missing start_waiting_for_healthy() counterpart.
* Only subscribe to the next map until implemented.
* See https://tracker.ceph.com/issues/66832
*/
+ DEBUG(" osdmap subscribe to {}", osdmap->get_epoch() + 1);
co_await get_shard_services().osdmap_subscribe(osdmap->get_epoch() + 1, false);
}
if (pg_shard_manager.is_active()) {
#include "include/ceph_assert.h"
+#include "crimson/common/log.h"
#include "crimson/osd/scrub/scrub_machine.h"
+SET_SUBSYS(osd);
+
namespace crimson::osd::scrub {
WaitUpdate::WaitUpdate(my_context ctx) : ScrubState(ctx)
sc::result ScanRange::react(const ScrubContext::scan_range_complete_t &event)
{
+ LOG_PREFIX(ScanRange::react);
+ DEBUGDPP(" Got ScrubContext::scan_range_complete_t", get_scrub_context().get_dpp());
auto [_, inserted] = maps.insert(event.value.to_pair());
ceph_assert(inserted);
ceph_assert(waiting_on > 0);
+ DEBUGDPP(" Waiting on: {}, before decrement! ", get_scrub_context().get_dpp(), waiting_on);
--waiting_on;
+ DEBUGDPP(" Waiting on: {}, after decrement! ", get_scrub_context().get_dpp(), waiting_on);
if (waiting_on > 0) {
+ DEBUGDPP(" Waiting on: {}, discarding event! ", get_scrub_context().get_dpp(), waiting_on);
return discard_event();
} else {
+ DEBUGDPP(" ceph_assert(context<ChunkState>().range)", get_scrub_context().get_dpp());
ceph_assert(context<ChunkState>().range);
{
+ DEBUGDPP(" Calling validate_chunk", get_scrub_context().get_dpp());
auto results = validate_chunk(
get_scrub_context().get_dpp(),
context<Scrubbing>().policy,
maps);
+ DEBUGDPP(" From validate_chunk Scrub errors: {} ", get_scrub_context().get_dpp(), results.stats.num_scrub_errors);
context<Scrubbing>().stats.add(results.stats);
+ DEBUGDPP(" Calling emit_chunk_result", get_scrub_context().get_dpp());
get_scrub_context().emit_chunk_result(
*(context<ChunkState>().range),
std::move(results));
}
if (context<ChunkState>().range->end.is_max()) {
+ DEBUGDPP(" Calling emit_scrub_result", get_scrub_context().get_dpp());
get_scrub_context().emit_scrub_result(
context<Scrubbing>().deep,
context<Scrubbing>().stats);
return transit<PrimaryActive>();
} else {
+ DEBUGDPP(" Before advance_current to ChunkState", get_scrub_context().get_dpp());
context<Scrubbing>().advance_current(
context<ChunkState>().range->end);
return transit<ChunkState>();
{
LOG_PREFIX(OSDSingletonState::osdmap_subscribe);
INFO("epoch {}", epoch);
+ INFO("force request? {}", force_request);
if (monc.sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) ||
force_request) {
return monc.renew_subs();
ceph::os::Transaction& t,
epoch_t e, bufferlist&& bl)
{
+ LOG_PREFIX(OSDSingletonState::store_map_bl);
+ DEBUG(" storing osdmap.{}", e);
meta_coll->store_map(t, e, bl);
+ DEBUG(" stored {} meta_coll", e);
map_bl_cache.insert(e, std::move(bl));
}
ceph::os::Transaction& t,
epoch_t e, bufferlist&& bl)
{
+ LOG_PREFIX(OSDSingletonState::store_inc_map_bl);
+ DEBUG(" storing osdmap.{}", e);
meta_coll->store_inc_map(t, e, bl);
+ DEBUG(" stored {} in meta_coll", e);
inc_map_bl_cache.insert(e, std::move(bl));
}
epoch_t last)
{
LOG_PREFIX(OSDSingletonState::build_incremental_map_msg);
+ DEBUG(" first {} last {}", first, last);
return seastar::do_with(crimson::common::local_conf()->osd_map_message_max,
crimson::make_message<MOSDMap>(
monc.get_fsid(),
// we don't have the next map the target wants,
// so start with a full map.
first = superblock.cluster_osdmap_trim_lower_bound;
+ DEBUG("going to load_map_bl got {}", first);
maybe_handle_mapgap = load_map_bl(first).then(
[&first, &map_message_max, &m](auto&& bl) {
m->maps[first] = std::move(bl);
++first;
});
}
- return maybe_handle_mapgap.then([this, first, last, &map_message_max, &m] {
+ return maybe_handle_mapgap.then([this, FNAME, first, last, &map_message_max, &m] {
if (first > last) {
// first may be later than last in the case of map gap
+ DEBUG(" there is a mapgap! {} > {} ", first, last);
ceph_assert(!m->maps.empty());
return seastar::make_ready_future<MURef<MOSDMap>>(std::move(m));
}
+ DEBUG(" going to load_map_bls");
return load_map_bls(
first,
((last - first) > map_message_max) ? (first + map_message_max) : last
int osd,
epoch_t first)
{
- LOG_PREFIX(OSDSingletonState::send_incremental_map);
+ LOG_PREFIX(OSDSingletonState::send_incremental_map_to_osd);
if (osdmap->is_down(osd)) {
INFO("osd.{} is_down", osd);
return seastar::now();
} else {
auto conn = cluster_msgr.connect(
osdmap->get_cluster_addrs(osd).front(), CEPH_ENTITY_TYPE_OSD);
+ DEBUG(" sending {} to osd.{}", first, osd);
return send_incremental_map(*conn, first);
}
}