]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: an fmtlib formatter for OpSchedulerItem 52370/head
authorRonen Friedman <rfriedma@redhat.com>
Sun, 9 Jul 2023 09:41:54 +0000 (04:41 -0500)
committerRonen Friedman <rfriedma@redhat.com>
Sun, 9 Jul 2023 14:29:29 +0000 (09:29 -0500)
Signed-off-by: Ronen Friedman <rfriedma@redhat.com>
src/osd/OSD.cc
src/osd/scheduler/OpSchedulerItem.h
src/test/osd/TestMClockScheduler.cc

index e9d4b1c886a094dca1a348a7f9dec088c7572b2b..488c22f0a2ad18a7350adaf2b79ad9b1a9fcca62 100644 (file)
@@ -11190,7 +11190,7 @@ void OSD::ShardedOpWQ::_enqueue(OpSchedulerItem&& item) {
   OSDShard* sdata = osd->shards[shard_index];
   assert (NULL != sdata);
 
-  dout(20) << __func__ << " " << item << dendl;
+  dout(20) << fmt::format("{} {}", __func__, item) << dendl;
 
   bool empty = true;
   {
index a9ec14de332c6af4575d38bd5d22374a49fcfa1d..286e23f534519613f4a3707d98f24f7f4a8d86ce 100644 (file)
@@ -17,7 +17,8 @@
 #include <ostream>
 
 #include "include/types.h"
-#include "include/utime.h"
+#include "include/utime_fmt.h"
+#include "osd/osd_types_fmt.h"
 #include "osd/OpRequest.h"
 #include "osd/PG.h"
 #include "osd/PGPeeringEvent.h"
@@ -71,6 +72,8 @@ public:
     }
 
     virtual std::ostream &print(std::ostream &rhs) const = 0;
+    /// and a version geared towards fmt::format use:
+    virtual std::string print() const = 0;
 
     virtual void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) = 0;
     virtual op_scheduler_class get_scheduler_class() const = 0;
@@ -123,6 +126,8 @@ public:
   OpSchedulerItem &operator=(OpSchedulerItem &&) = default;
   OpSchedulerItem &operator=(const OpSchedulerItem &) = delete;
 
+  friend struct fmt::formatter<OpSchedulerItem>;
+
   uint32_t get_queue_token() const {
     return qitem->get_queue_token();
   }
@@ -187,6 +192,7 @@ public:
   }
 }; // class OpSchedulerItem
 
+
 /// Implements boilerplate for operations queued for the pg lock
 class PGOpQueueable : public OpSchedulerItem::OpQueueable {
   spg_t pgid;
@@ -226,6 +232,10 @@ public:
     return rhs << "PGOpItem(op=" << *(op->get_req()) << ")";
   }
 
+  std::string print() const override {
+    return fmt::format("PGOpItem(op={})", *(op->get_req()));
+  }
+
   std::optional<OpRequestRef> maybe_get_op() const final {
     return op;
   }
@@ -250,6 +260,9 @@ public:
   std::ostream &print(std::ostream &rhs) const final {
     return rhs << "PGPeeringEvent(" << evt->get_desc() << ")";
   }
+  std::string print() const final {
+    return fmt::format("PGPeeringEvent({})", evt->get_desc());
+  }
   void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
   bool is_peering() const override {
     return true;
@@ -277,6 +290,10 @@ public:
               << " epoch_queued=" << epoch_queued
               << ")";
   }
+  std::string print() const final {
+    return fmt::format(
+       "PGSnapTrim(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
+  }
   void run(
     OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
   op_scheduler_class get_scheduler_class() const final {
@@ -296,6 +313,10 @@ public:
               << "epoch_queued=" << epoch_queued
               << ")";
   }
+  std::string print() const final {
+    return fmt::format(
+       "PGScrub(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
+  }
   void run(
     OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
   op_scheduler_class get_scheduler_class() const final {
@@ -329,6 +350,11 @@ class PGScrubItem : public PGOpQueueable {
               << "epoch_queued=" << epoch_queued
               << " scrub-token=" << activation_index << ")";
   }
+  std::string print() const override {
+    return fmt::format(
+       "{}(pgid={} epoch_queued={} scrub-token={})", message_name, get_pgid(),
+       epoch_queued, activation_index);
+  }
   void run(OSD* osd,
           OSDShard* sdata,
           PGRef& pg,
@@ -513,6 +539,11 @@ public:
               << " reserved_pushes=" << reserved_pushes
               << ")";
   }
+  std::string print() const final {
+    return fmt::format(
+       "PGRecovery(pgid={} epoch_queued={} reserved_pushes={})", get_pgid(),
+       epoch_queued, reserved_pushes);
+  }
   uint64_t get_reserved_pushes() const final {
     return reserved_pushes;
   }
@@ -540,6 +571,10 @@ public:
               << " c=" << c.get() << " epoch=" << epoch
               << ")";
   }
+  std::string print() const final {
+    return fmt::format(
+       "PGRecoveryContext(pgid={} c={} epoch={})", get_pgid(), (void*)c.get(), epoch);
+  }
   void run(
     OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
   op_scheduler_class get_scheduler_class() const final {
@@ -560,6 +595,10 @@ public:
               << " e" << epoch_queued
               << ")";
   }
+  std::string print() const final {
+    return fmt::format(
+       "PGDelete(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
+  }
   void run(
     OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
   op_scheduler_class get_scheduler_class() const final {
@@ -593,6 +632,10 @@ public:
     return rhs << "PGRecoveryMsg(op=" << *(op->get_req()) << ")";
   }
 
+  std::string print() const final {
+    return fmt::format("PGRecoveryMsg(op={})", *(op->get_req()));
+  }
+
   std::optional<OpRequestRef> maybe_get_op() const final {
     return op;
   }
@@ -601,7 +644,35 @@ public:
     return priority_to_scheduler_class(op->get_req()->get_priority());
   }
 
-  void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
+  void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
+      final;
 };
 
-}
+}  // namespace ceph::osd::scheduler
+
+template <>
+struct fmt::formatter<ceph::osd::scheduler::OpSchedulerItem> {
+  constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
+  template <typename FormatContext>
+  auto format(
+      const ceph::osd::scheduler::OpSchedulerItem& opsi,
+      FormatContext& ctx) const
+  {
+    // matching existing op_scheduler_item_t::operator<<() format
+    using class_t =
+       std::underlying_type_t<ceph::osd::scheduler::op_scheduler_class>;
+    const auto qos_cost = opsi.was_queued_via_mclock()
+                             ? fmt::format(" qos_cost {}", opsi.qos_cost)
+                             : "";
+    const auto pushes =
+       opsi.get_reserved_pushes()
+           ? fmt::format(" reserved_pushes {}", opsi.get_reserved_pushes())
+           : "";
+
+    return format_to(
+       ctx.out(), "OpSchedulerItem({} {} class_id {} prio {}{} cost {} e{}{})",
+       opsi.get_ordering_token(), opsi.qitem->print(),
+       static_cast<class_t>(opsi.get_scheduler_class()), opsi.get_priority(),
+       qos_cost, opsi.get_cost(), opsi.get_map_epoch(), pushes);
+  }
+};
index 8291da2684a9682ab239bbb67188a0a1d963b3d1..e7bac03d2abd56ad9839f151b7b0c71b335066c3 100644 (file)
@@ -62,6 +62,10 @@ public:
 
     ostream &print(ostream &rhs) const final { return rhs; }
 
+    std::string print() const final {
+      return std::string();
+    }
+
     std::optional<OpRequestRef> maybe_get_op() const final {
       return std::nullopt;
     }