In order to expose osd_bandwidth_cost_per_io, we add a new function get_cost_per_io. The intention is to use this value for better cost estimation for operations being queued in the mClock scheduler.
Signed-off-by: Aishwarya Mathuria <amathuri@redhat.com>
osd->op_shardedwq.queue_front(std::move(qi));
}
+double OSDService::get_cost_per_io() const
+{
+ return osd->op_shardedwq.get_cost_per_io();
+}
+
void OSDService::queue_recovery_context(
PG *pg,
GenContext<ThreadPool::TPHandle&> *c,
void enqueue_back(OpSchedulerItem&& qi);
void enqueue_front(OpSchedulerItem&& qi);
+ /// scheduler cost per io, only valid for mclock, asserts for wpq
+ double get_cost_per_io() const;
void maybe_inject_dispatch_delay() {
if (g_conf()->osd_debug_inject_dispatch_delay_probability > 0) {
p->complete(0);
}
}
+
+ double get_cost_per_io() const {
+ auto &sdata = osd->shards[0];
+ return sdata->scheduler->get_cost_per_io();
+ }
} op_shardedwq;
#include "mon/MonClient.h"
#include "osd/scheduler/OpSchedulerItem.h"
+#include "include/ceph_assert.h"
+
namespace ceph::osd::scheduler {
using client = uint64_t;
// Get the scheduler type set for the queue
virtual op_queue_type_t get_type() const = 0;
+ virtual double get_cost_per_io() const {
+ ceph_assert(0 == "impossible for wpq");
+ return 0.0;
+ }
+
// Destructor
virtual ~OpScheduler() {};
};
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
+
+ double get_cost_per_io() const {
+ return osd_bandwidth_cost_per_io;
+ }
private:
// Enqueue the op to the high priority queue
void enqueue_high(unsigned prio, OpSchedulerItem &&item, bool front = false);