]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: Display scheduler specific info when dumping an OpSchedulerItem 44355/head
authorSridhar Seshasayee <sseshasa@redhat.com>
Fri, 17 Dec 2021 12:30:05 +0000 (18:00 +0530)
committerSridhar Seshasayee <sseshasa@redhat.com>
Tue, 11 Jan 2022 04:30:14 +0000 (10:00 +0530)
Implement logic to dump information relevant to the scheduler type being
employed when dumping details about an OpSchedulerItem. For e.g., the
'priority' field is relevant for the 'wpq' scheduler, but for the
'mclock_scheduler', the 'qos_cost' gives more information during debugging.

A couple of additional fields called 'qos_cost' and 'is_qos_request' are
introduced in OpSchedulerItem class. These are mainly used to facilitate
dumping of relevant information depending on the scheduler type. The
interesting points are when an item is enqueued and dequeued.

For the 'mclock_scheduler', the 'class_id' and the 'qos_cost' fields are
dumped during enqueue and dequeue op respectively. For the 'wpq' scheduler
things remain the same as before.

An additional benefit of this change is to help immediately identify the
type of scheduler being used for a given shard depending on what is dumped
in the debug messages while debugging.

Signed-off-by: Sridhar Seshasayee <sseshasa@redhat.com>
src/osd/OSD.cc
src/osd/OSD.h
src/osd/scheduler/OpSchedulerItem.h

index b54560af44220f186b0d96c3fd842d534a7bbb5d..4a6f63e44788dcd84b3f54f5f9693ada8f240de1 100644 (file)
@@ -10608,6 +10608,13 @@ void OSDShard::update_scheduler_config()
   scheduler->update_configuration();
 }
 
+std::string OSDShard::get_scheduler_type()
+{
+  std::ostringstream scheduler_type;
+  scheduler_type << *scheduler;
+  return scheduler_type.str();
+}
+
 OSDShard::OSDShard(
   int id,
   CephContext *cct,
@@ -10996,10 +11003,13 @@ void OSD::ShardedOpWQ::_enqueue(OpSchedulerItem&& item) {
   uint32_t shard_index =
     item.get_ordering_token().hash_to_shard(osd->shards.size());
 
-  dout(20) << __func__ << " " << item << dendl;
-
   OSDShard* sdata = osd->shards[shard_index];
   assert (NULL != sdata);
+  if (sdata->get_scheduler_type() == "mClockScheduler") {
+    item.maybe_set_is_qos_item();
+  }
+
+  dout(20) << __func__ << " " << item << dendl;
 
   bool empty = true;
   {
index 1914bc1b57fa7081188fcfdc9253a11d10e48f19..472a0805b4152cc97350046a9c73f16a3e9f70f0 100644 (file)
@@ -1061,6 +1061,7 @@ struct OSDShard {
   void register_and_wake_split_child(PG *pg);
   void unprime_split_children(spg_t parent, unsigned old_pg_num);
   void update_scheduler_config();
+  std::string get_scheduler_type();
 
   OSDShard(
     int id,
index a70e9820b6e5642fec5d10e88d75131b01a8857d..68ee0d734c228416102e6a77305a6be8f6c26510 100644 (file)
@@ -106,6 +106,8 @@ private:
   utime_t start_time;
   uint64_t owner;  ///< global id (e.g., client.XXX)
   epoch_t map_epoch;    ///< an epoch we expect the PG to exist in
+  int qos_cost;  ///< scaled cost calculated by the mclock scheduler
+  bool qos_item;  ///< set to true if item is scheduled by mclock scheduler
 
 public:
   OpSchedulerItem(
@@ -121,7 +123,7 @@ public:
       start_time(start_time),
       owner(owner),
       map_epoch(e)
-  {}
+  { qos_cost = 0; qos_item = false; }
   OpSchedulerItem(OpSchedulerItem &&) = default;
   OpSchedulerItem(const OpSchedulerItem &) = delete;
   OpSchedulerItem &operator=(OpSchedulerItem &&) = default;
@@ -171,15 +173,45 @@ public:
     return qitem->get_scheduler_class();
   }
 
+  void maybe_set_is_qos_item() {
+    if (get_scheduler_class() != op_scheduler_class::immediate) {
+      qos_item = true ;
+    }
+  }
+
+  bool is_qos_item() const {
+    return qos_item;
+  }
+
+  void set_qos_cost(int scaled_cost) {
+    qos_cost = scaled_cost;
+  }
+
+  int get_qos_cost() const {
+    return qos_cost;
+  }
+
   friend std::ostream& operator<<(std::ostream& out, const OpSchedulerItem& item) {
-     out << "OpSchedulerItem("
-        << item.get_ordering_token() << " " << *item.qitem
-        << " prio " << item.get_priority()
-        << " cost " << item.get_cost()
-        << " e" << item.get_map_epoch();
-     if (item.get_reserved_pushes()) {
-       out << " reserved_pushes " << item.get_reserved_pushes();
-     }
+    out << "OpSchedulerItem("
+        << item.get_ordering_token() << " " << *item.qitem;
+
+    if (item.is_qos_item()) {
+      out << " class_id " << item.get_scheduler_class();
+    } else {
+      out << " prio " << item.get_priority();
+    }
+
+    if (item.get_qos_cost()) {
+      out << " qos_cost " << item.get_qos_cost();
+    }
+
+    out << " cost " << item.get_cost()
+        << " e" << item.get_map_epoch();
+
+    if (item.get_reserved_pushes()) {
+      out << " reserved_pushes " << item.get_reserved_pushes();
+    }
+
     return out << ")";
   }
 }; // class OpSchedulerItem