namespace ceph::osd::scheduler {
+void mClockScheduler::_get_mclock_counter(scheduler_id_t id)
+{
+ if (!logger) {
+ return;
+ }
+
+ /* op enter mclock queue will +1 */
+ logger->inc(l_mclock_all_type_queue_len);
+
+ switch (id.class_id) {
+ case op_scheduler_class::immediate:
+ logger->inc(l_mclock_immediate_queue_len);
+ break;
+ case op_scheduler_class::client:
+ logger->inc(l_mclock_client_queue_len);
+ break;
+ case op_scheduler_class::background_recovery:
+ logger->inc(l_mclock_recovery_queue_len);
+ break;
+ case op_scheduler_class::background_best_effort:
+ logger->inc(l_mclock_best_effort_queue_len);
+ break;
+ default:
+ derr << __func__ << " unknown class_id=" << id.class_id
+ << " unknown id=" << id << dendl;
+ break;
+ }
+}
+
+void mClockScheduler::_put_mclock_counter(scheduler_id_t id)
+{
+ if (!logger) {
+ return;
+ }
+
+ /* op leave mclock queue will -1 */
+ logger->dec(l_mclock_all_type_queue_len);
+
+ switch (id.class_id) {
+ case op_scheduler_class::immediate:
+ logger->dec(l_mclock_immediate_queue_len);
+ break;
+ case op_scheduler_class::client:
+ logger->dec(l_mclock_client_queue_len);
+ break;
+ case op_scheduler_class::background_recovery:
+ logger->dec(l_mclock_recovery_queue_len);
+ break;
+ case op_scheduler_class::background_best_effort:
+ logger->dec(l_mclock_best_effort_queue_len);
+ break;
+ default:
+ derr << __func__ << " unknown class_id=" << id.class_id
+ << " unknown id=" << id << dendl;
+ break;
+ }
+}
+
+void mClockScheduler::_init_logger()
+{
+ PerfCountersBuilder m(cct, "mclock-shard-queue-" + std::to_string(shard_id),
+ l_mclock_first, l_mclock_last);
+
+ m.add_u64_counter(l_mclock_immediate_queue_len, "mclock_immediate_queue_len",
+ "high_priority op count in mclock queue");
+ m.add_u64_counter(l_mclock_client_queue_len, "mclock_client_queue_len",
+ "client type op count in mclock queue");
+ m.add_u64_counter(l_mclock_recovery_queue_len, "mclock_recovery_queue_len",
+ "background_recovery type op count in mclock queue");
+ m.add_u64_counter(l_mclock_best_effort_queue_len, "mclock_best_effort_queue_len",
+ "background_best_effort type op count in mclock queue");
+ m.add_u64_counter(l_mclock_all_type_queue_len, "mclock_all_type_queue_len",
+ "all type op count in mclock queue");
+
+ logger = m.create_perf_counters();
+ cct->get_perfcounters_collection()->add(logger);
+
+ logger->set(l_mclock_immediate_queue_len, 0);
+ logger->set(l_mclock_client_queue_len, 0);
+ logger->set(l_mclock_recovery_queue_len, 0);
+ logger->set(l_mclock_best_effort_queue_len, 0);
+ logger->set(l_mclock_all_type_queue_len, 0);
+}
+
mClockScheduler::mClockScheduler(CephContext *cct,
int whoami,
uint32_t num_shards,
int shard_id,
bool is_rotational,
unsigned cutoff_priority,
- MonClient *monc)
+ MonClient *monc,
+ bool init_perfcounter)
: cct(cct),
whoami(whoami),
num_shards(num_shards),
set_config_defaults_from_profile();
client_registry.update_from_config(
cct->_conf, osd_bandwidth_capacity_per_shard);
+ if (init_perfcounter) {
+ _init_logger();
+ }
}
/* ClientRegistry holds the dmclock::ClientInfo configuration parameters
std::move(item),
id,
cost);
+ _get_mclock_counter(id);
}
dout(20) << __func__ << " client_count: " << scheduler.client_count()
} else {
high_priority[priority].push_front(std::move(item));
}
+
+ scheduler_id_t id = scheduler_id_t {
+ op_scheduler_class::immediate,
+ client_profile_id_t()
+ };
+ _get_mclock_counter(id);
}
WorkItem mClockScheduler::dequeue()
high_priority.erase(iter);
}
ceph_assert(std::get_if<OpSchedulerItem>(&ret));
+
+ scheduler_id_t id = scheduler_id_t {
+ op_scheduler_class::immediate,
+ client_profile_id_t()
+ };
+ _put_mclock_counter(id);
return ret;
} else {
mclock_queue_t::PullReq result = scheduler.pull_request();
ceph_assert(result.is_retn());
auto &retn = result.get_retn();
+ _put_mclock_counter(retn.client);
return std::move(*retn.request);
}
}
mClockScheduler::~mClockScheduler()
{
cct->_conf.remove_observer(this);
+ if (logger) {
+ delete logger;
+ logger = nullptr;
+ }
}
}
#include "osd/scheduler/OpSchedulerItem.h"
+enum {
+ l_mclock_first = 15000,
+ l_mclock_immediate_queue_len,
+ l_mclock_client_queue_len,
+ l_mclock_recovery_queue_len,
+ l_mclock_best_effort_queue_len,
+ l_mclock_all_type_queue_len,
+ l_mclock_last,
+};
+
namespace ceph::osd::scheduler {
constexpr double default_min = 0.0;
const bool is_rotational;
const unsigned cutoff_priority;
MonClient *monc;
+ PerfCounters *logger = nullptr;
/**
* osd_bandwidth_cost_per_io
public:
mClockScheduler(CephContext *cct, int whoami, uint32_t num_shards,
int shard_id, bool is_rotational, unsigned cutoff_priority,
- MonClient *monc);
+ MonClient *monc, bool init_perfcounter=true);
~mClockScheduler() override;
/// Calculate scaled cost per item
private:
// Enqueue the op to the high priority queue
void enqueue_high(unsigned prio, OpSchedulerItem &&item, bool front = false);
+ void _init_logger();
+ void _get_mclock_counter(scheduler_id_t id);
+ void _put_mclock_counter(scheduler_id_t id);
};
}