From a875a76b8d6bcbdba39300e190c45f0e330b2fb9 Mon Sep 17 00:00:00 2001 From: Sridhar Seshasayee Date: Fri, 17 Dec 2021 18:00:05 +0530 Subject: [PATCH] osd: Display scheduler specific info when dumping an OpSchedulerItem Implement logic to dump information relevant to the scheduler type being employed when dumping details about an OpSchedulerItem. For e.g., the 'priority' field is relevant for the 'wpq' scheduler, but for the 'mclock_scheduler', the 'qos_cost' gives more information during debugging. A couple of additional fields called 'qos_cost' and 'is_qos_request' are introduced in OpSchedulerItem class. These are mainly used to facilitate dumping of relevant information depending on the scheduler type. The interesting points are when an item is enqueued and dequeued. For the 'mclock_scheduler', the 'class_id' and the 'qos_cost' fields are dumped during enqueue and dequeue op respectively. For the 'wpq' scheduler things remain the same as before. An additional benefit of this change is to help immediately identify the type of scheduler being used for a given shard depending on what is dumped in the debug messages while debugging. Signed-off-by: Sridhar Seshasayee --- src/osd/OSD.cc | 14 ++++++-- src/osd/OSD.h | 1 + src/osd/scheduler/OpSchedulerItem.h | 50 +++++++++++++++++++++++------ 3 files changed, 54 insertions(+), 11 deletions(-) diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index b54560af442..4a6f63e4478 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -10608,6 +10608,13 @@ void OSDShard::update_scheduler_config() scheduler->update_configuration(); } +std::string OSDShard::get_scheduler_type() +{ + std::ostringstream scheduler_type; + scheduler_type << *scheduler; + return scheduler_type.str(); +} + OSDShard::OSDShard( int id, CephContext *cct, @@ -10996,10 +11003,13 @@ void OSD::ShardedOpWQ::_enqueue(OpSchedulerItem&& item) { uint32_t shard_index = item.get_ordering_token().hash_to_shard(osd->shards.size()); - dout(20) << __func__ << " " << item << dendl; - OSDShard* sdata = osd->shards[shard_index]; assert (NULL != sdata); + if (sdata->get_scheduler_type() == "mClockScheduler") { + item.maybe_set_is_qos_item(); + } + + dout(20) << __func__ << " " << item << dendl; bool empty = true; { diff --git a/src/osd/OSD.h b/src/osd/OSD.h index 1914bc1b57f..472a0805b41 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1061,6 +1061,7 @@ struct OSDShard { void register_and_wake_split_child(PG *pg); void unprime_split_children(spg_t parent, unsigned old_pg_num); void update_scheduler_config(); + std::string get_scheduler_type(); OSDShard( int id, diff --git a/src/osd/scheduler/OpSchedulerItem.h b/src/osd/scheduler/OpSchedulerItem.h index a70e9820b6e..68ee0d734c2 100644 --- a/src/osd/scheduler/OpSchedulerItem.h +++ b/src/osd/scheduler/OpSchedulerItem.h @@ -106,6 +106,8 @@ private: utime_t start_time; uint64_t owner; ///< global id (e.g., client.XXX) epoch_t map_epoch; ///< an epoch we expect the PG to exist in + int qos_cost; ///< scaled cost calculated by the mclock scheduler + bool qos_item; ///< set to true if item is scheduled by mclock scheduler public: OpSchedulerItem( @@ -121,7 +123,7 @@ public: start_time(start_time), owner(owner), map_epoch(e) - {} + { qos_cost = 0; qos_item = false; } OpSchedulerItem(OpSchedulerItem &&) = default; OpSchedulerItem(const OpSchedulerItem &) = delete; OpSchedulerItem &operator=(OpSchedulerItem &&) = default; @@ -171,15 +173,45 @@ public: return qitem->get_scheduler_class(); } + void maybe_set_is_qos_item() { + if (get_scheduler_class() != op_scheduler_class::immediate) { + qos_item = true ; + } + } + + bool is_qos_item() const { + return qos_item; + } + + void set_qos_cost(int scaled_cost) { + qos_cost = scaled_cost; + } + + int get_qos_cost() const { + return qos_cost; + } + friend std::ostream& operator<<(std::ostream& out, const OpSchedulerItem& item) { - out << "OpSchedulerItem(" - << item.get_ordering_token() << " " << *item.qitem - << " prio " << item.get_priority() - << " cost " << item.get_cost() - << " e" << item.get_map_epoch(); - if (item.get_reserved_pushes()) { - out << " reserved_pushes " << item.get_reserved_pushes(); - } + out << "OpSchedulerItem(" + << item.get_ordering_token() << " " << *item.qitem; + + if (item.is_qos_item()) { + out << " class_id " << item.get_scheduler_class(); + } else { + out << " prio " << item.get_priority(); + } + + if (item.get_qos_cost()) { + out << " qos_cost " << item.get_qos_cost(); + } + + out << " cost " << item.get_cost() + << " e" << item.get_map_epoch(); + + if (item.get_reserved_pushes()) { + out << " reserved_pushes " << item.get_reserved_pushes(); + } + return out << ")"; } }; // class OpSchedulerItem -- 2.39.5