#include "include/types.h"
#include "common/Formatter.h"
+#include "crimson/common/log.h"
#include "crimson/osd/pg.h"
#include "crimson/osd/osdmap_service.h"
#include "crimson/osd/shard_services.h"
#include <seastar/coroutine/parallel_for_each.hh>
#include "osd/PeeringState.h"
-namespace {
- seastar::logger& logger() {
- return crimson::get_logger(ceph_subsys_osd);
- }
-}
+SET_SUBSYS(osd);
namespace crimson::osd {
: pg(pg), shard_services(shard_services), to(to),
rctx(std::move(rctx)), do_init(do_init)
{
- logger().debug("{}: created", *this);
+ LOG_PREFIX(PGAdvanceMap);
+ DEBUG("{}: created", *this);
}
PGAdvanceMap::~PGAdvanceMap() {}
seastar::future<> PGAdvanceMap::start()
{
+ LOG_PREFIX(PGAdvanceMap::start);
using cached_map_t = OSDMapService::cached_map_t;
- logger().debug("{}: start", *this);
+ DEBUG("{}: start", *this);
IRef ref = this;
return enter_stage<>(
peering_pp(*pg).process
- ).then([this] {
+ ).then([this, FNAME] {
/*
* PGAdvanceMap is scheduled at pg creation and when
* broadcasting new osdmaps to pgs. We are not able to serialize
return seastar::do_for_each(
boost::make_counting_iterator(*from + 1),
boost::make_counting_iterator(to + 1),
- [this](epoch_t next_epoch) {
- logger().debug("{}: start: getting map {}",
+ [this, FNAME](epoch_t next_epoch) {
+ DEBUG("{}: start: getting map {}",
*this, next_epoch);
return shard_services.get_map(next_epoch).then(
- [this] (cached_map_t&& next_map) {
- logger().debug("{}: advancing map to {}",
- *this, next_map->get_epoch());
+ [this, FNAME] (cached_map_t&& next_map) {
+ DEBUG("{}: advancing map to {}",
+ *this, next_map->get_epoch());
pg->handle_advance_map(next_map, rctx);
return check_for_splits(*from, next_map);
});
- }).then([this] {
+ }).then([this, FNAME] {
pg->handle_activate_map(rctx);
- logger().debug("{}: map activated", *this);
+ DEBUG("{}: map activated", *this);
if (do_init) {
shard_services.pg_created(pg->get_pgid(), pg);
- logger().info("PGAdvanceMap::start new pg {}", *pg);
+ INFO("PGAdvanceMap::start new pg {}", *pg);
}
return pg->complete_rctx(std::move(rctx));
});
- }).then([this] {
- logger().debug("{}: complete", *this);
+ }).then([this, FNAME] {
+ DEBUG("{}: complete", *this);
return handle.complete();
- }).finally([this, ref=std::move(ref)] {
- logger().debug("{}: exit", *this);
+ }).finally([this, FNAME, ref=std::move(ref)] {
+ DEBUG("{}: exit", *this);
handle.exit();
});
}
epoch_t old_epoch,
cached_map_t next_map)
{
+ LOG_PREFIX(PGAdvanceMap::check_for_splits);
using cached_map_t = OSDMapService::cached_map_t;
cached_map_t old_map = co_await shard_services.get_map(old_epoch);
if (!old_map->have_pg_pool(pg->get_pgid().pool())) {
- logger().debug("{} pool doesn't exist in epoch {}", pg->get_pgid(),
+ DEBUG("{} pool doesn't exist in epoch {}", pg->get_pgid(),
old_epoch);
co_return;
}
auto old_pg_num = old_map->get_pg_num(pg->get_pgid().pool());
if (!next_map->have_pg_pool(pg->get_pgid().pool())) {
- logger().debug("{} pool doesn't exist in epoch {}", pg->get_pgid(),
+ DEBUG("{} pool doesn't exist in epoch {}", pg->get_pgid(),
next_map->get_epoch());
co_return;
}
auto new_pg_num = next_map->get_pg_num(pg->get_pgid().pool());
- logger().debug(" pg_num change in e{} {} -> {}", next_map->get_epoch(),
+ DEBUG(" pg_num change in e{} {} -> {}", next_map->get_epoch(),
old_pg_num, new_pg_num);
std::set<spg_t> children;
if (new_pg_num && new_pg_num > old_pg_num) {
std::set<spg_t> split_children,
cached_map_t next_map)
{
- logger().debug("{}: start", *this);
+ LOG_PREFIX(PGAdvanceMap::split_pg);
+ DEBUG("{}: start", *this);
auto pg_epoch = next_map->get_epoch();
- logger().debug("{}: epoch: {}", *this, pg_epoch);
+ DEBUG("{}: epoch: {}", *this, pg_epoch);
co_await seastar::coroutine::parallel_for_each(split_children, [this, &next_map,
- pg_epoch] (auto child_pgid) -> seastar::future<> {
+ pg_epoch, FNAME] (auto child_pgid) -> seastar::future<> {
children_pgids.insert(child_pgid);
// Map each child pg ID to a core
auto core = co_await shard_services.create_split_pg_mapping(child_pgid, seastar::this_shard_id());
- logger().debug(" PG {} mapped to {}", child_pgid.pgid, core);
- logger().debug(" {} map epoch: {}", child_pgid.pgid, pg_epoch);
+ DEBUG(" PG {} mapped to {}", child_pgid.pgid, core);
+ DEBUG(" {} map epoch: {}", child_pgid.pgid, pg_epoch);
auto map = next_map;
auto child_pg = co_await shard_services.make_pg(std::move(map), child_pgid, true);
- logger().debug(" Parent PG: {}", pg->get_pgid());
- logger().debug(" Child PG ID: {}", child_pg->get_pgid());
+ DEBUG(" Parent pgid: {}", pg->get_pgid());
+ DEBUG(" Child pgid: {}", child_pg->get_pgid());
unsigned new_pg_num = next_map->get_pg_num(pg->get_pgid().pool());
// Depending on the new_pg_num the parent PG's collection is split.
// The child PG will be initiated with this split collection.
unsigned split_bits = child_pg->get_pgid().get_split_bits(new_pg_num);
- logger().debug(" pg num is {}, m_seed is {}, split bits is {}",
- new_pg_num, child_pg->get_pgid().ps(), split_bits);
+ DEBUG(" pg num is {}, m_seed is {}, split bits is {}",
+ new_pg_num, child_pg->get_pgid().ps(), split_bits);
co_await pg->split_colls(child_pg->get_pgid(), split_bits, child_pg->get_pgid().ps(),
&child_pg->get_pgpool().info, rctx.transaction);
- logger().debug(" {} split collection done", child_pg->get_pgid());
+ DEBUG(" {} split collection done", child_pg->get_pgid());
// Update the child PG's info from the parent PG
pg->split_into(child_pg->get_pgid().pgid, child_pg, split_bits);
Ref<PG> child_pg,
cached_map_t next_map)
{
+ LOG_PREFIX(PGAdvanceMap::handle_split_pg_creation);
// We must create a new Trigger instance for each pg.
// The BlockingEvent object which tracks whether a pg creation is complete
// or still blocking, shouldn't be used across multiple pgs so we can track
// See: ShardServices::get_or_create_pg for similar dependency
// when calling handle_pg_create_info before returning a pg creation future.
auto fut = shard_services.create_split_pg(std::move(trigger),
- child_pg->get_pgid()).handle_error(crimson::ct_error::ecanceled::handle([](auto) {
+ child_pg->get_pgid()).handle_error(crimson::ct_error::ecanceled::handle([FNAME](auto) {
+ DEBUG("PG creation canceled");
return seastar::make_ready_future<Ref<PG>>();
}));
co_await shard_services.start_operation<PGAdvanceMap>(