logger().debug("{}: map activated", *this);
if (do_init) {
shard_services.pg_created(pg->get_pgid(), pg);
- shard_services.inc_pg_num();
logger().info("PGAdvanceMap::start new pg {}", *pg);
}
return seastar::when_all_succeed(
unsigned PG::get_target_pg_log_entries() const
{
- const unsigned num_pgs = shard_services.get_pg_num();
- const unsigned target =
- local_conf().get_val<uint64_t>("osd_target_pg_log_entries_per_osd");
+ const unsigned local_num_pgs = shard_services.get_num_local_pgs();
+ const unsigned local_target =
+ local_conf().get_val<uint64_t>("osd_target_pg_log_entries_per_osd") /
+ seastar::smp::count;
const unsigned min_pg_log_entries =
local_conf().get_val<uint64_t>("osd_min_pg_log_entries");
- if (num_pgs > 0 && target > 0) {
+ if (local_num_pgs > 0 && local_target > 0) {
// target an even spread of our budgeted log entries across all
// PGs. note that while we only get to control the entry count
// for primary PGs, we'll normally be responsible for a mix of
// will work out.
const unsigned max_pg_log_entries =
local_conf().get_val<uint64_t>("osd_max_pg_log_entries");
- return std::clamp(target / num_pgs,
+ return std::clamp(local_target / local_num_pgs,
min_pg_log_entries,
max_pg_log_entries);
} else {
PG::do_delete_work(ceph::os::Transaction &t, ghobject_t _next)
{
// TODO
- shard_services.dec_pg_num();
return {_next, false};
}
pgs_t& get_pgs() { return pgs; }
const pgs_t& get_pgs() const { return pgs; }
+ auto get_pg_count() const { return pgs.size(); }
PGMap() = default;
~PGMap();
};
ShardServices &shard_services) {
return shard_services.load_pg(
pgid
- ).then([pgid, &per_shard_state, &shard_services](auto &&pg) {
+ ).then([pgid, &per_shard_state](auto &&pg) {
logger().info("load_pgs: loaded {}", pgid);
per_shard_state.pg_map.pg_loaded(pgid, std::move(pg));
- shard_services.inc_pg_num();
return seastar::now();
});
});
// TODO: add config to control mapping
PGShardMapping pg_to_shard_mapping{0, 1};
- unsigned num_pgs = 0;
- unsigned get_pg_num() const {
- return num_pgs;
- }
- void inc_pg_num() {
- ++num_pgs;
- }
- void dec_pg_num() {
- --num_pgs;
- }
std::set<pg_t> pg_created;
seastar::future<> send_pg_created(pg_t pgid);
return dispatch_context({}, std::move(ctx));
}
+ /// Return core-local pg count * number of cores
+ unsigned get_num_local_pgs() const {
+ return local_state.pg_map.get_pg_count();
+ }
+
// OSDMapService
cached_map_t get_map() const final { return local_state.get_osdmap(); }
epoch_t get_up_epoch() const final { return local_state.up_epoch; }
}
FORWARD_TO_OSD_SINGLETON(get_pool_info)
- FORWARD_TO_OSD_SINGLETON(get_pg_num)
FORWARD(with_throttle_while, with_throttle_while, local_state.throttler)
FORWARD_TO_OSD_SINGLETON(osdmap_subscribe)
FORWARD_TO_OSD_SINGLETON(remove_want_pg_temp)
FORWARD_TO_OSD_SINGLETON(requeue_pg_temp)
FORWARD_TO_OSD_SINGLETON(send_pg_created)
- FORWARD_TO_OSD_SINGLETON(inc_pg_num)
- FORWARD_TO_OSD_SINGLETON(dec_pg_num)
FORWARD_TO_OSD_SINGLETON(send_alive)
FORWARD_TO_OSD_SINGLETON(send_pg_temp)
FORWARD_CONST(get_mnow, get_mnow, osd_singleton_state)