]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
mon: show cache tier IO rate in 'osd pool stats'
authorZhiqiang Wang <zhiqiang.wang@intel.com>
Fri, 17 Jul 2015 02:38:43 +0000 (10:38 +0800)
committerZhiqiang Wang <zhiqiang.wang@intel.com>
Mon, 20 Jul 2015 02:51:58 +0000 (10:51 +0800)
The cache tier IO rate includes the flush/evict bandwidth and the
promote IOPS.

Signed-off-by: Zhiqiang Wang <zhiqiang.wang@intel.com>
src/mon/OSDMonitor.cc
src/mon/PGMap.cc
src/mon/PGMap.h

index 84545bff402033d3277f81ca5e99e54b4f15de36..4ac7a111b62e668c30f0b8c827949cb7a5e32d43 100644 (file)
@@ -3597,6 +3597,22 @@ bool OSDMonitor::preprocess_command(MMonCommand *m)
       if (!f && !rss.str().empty())
         tss << "  client io " << rss.str() << "\n";
 
+      // dump cache tier IO rate for cache pool
+      const pg_pool_t *pool = osdmap.get_pg_pool(poolid);
+      if (pool->is_tier()) {
+        if (f) {
+          f->close_section();
+          f->open_object_section("cache_io_rate");
+        }
+
+        rss.clear();
+        rss.str("");
+
+        pg_map.pool_cache_io_rate_summary(f.get(), &rss, poolid);
+        if (!f && !rss.str().empty())
+          tss << "  cache tier io " << rss.str() << "\n";
+      }
+
       if (f) {
         f->close_section();
         f->close_section();
index ada4db92fd03025d8a03e20a52f591b6799f540f..261dd71fb5ea25d45c651d83ba2aa7bf4d49d0aa 100644 (file)
@@ -1188,6 +1188,55 @@ void PGMap::pool_client_io_rate_summary(Formatter *f, ostream *out,
   client_io_rate_summary(f, out, p->second.first, ts->second);
 }
 
+void PGMap::cache_io_rate_summary(Formatter *f, ostream *out,
+                                  const pool_stat_t& delta_sum,
+                                  utime_t delta_stamp) const
+{
+  pool_stat_t pos_delta = delta_sum;
+  pos_delta.floor(0);
+  if (pos_delta.stats.sum.num_flush ||
+      pos_delta.stats.sum.num_evict ||
+      pos_delta.stats.sum.num_promote) {
+    if (pos_delta.stats.sum.num_flush) {
+      int64_t flush = (pos_delta.stats.sum.num_flush_kb << 10) / (double)delta_stamp;
+      if (f) {
+       f->dump_int("flush_bytes_sec", flush);
+      } else {
+       *out << pretty_si_t(flush) << "B/s flush";
+      }
+    }
+    if (pos_delta.stats.sum.num_evict) {
+      int64_t evict = (pos_delta.stats.sum.num_evict_kb << 10) / (double)delta_stamp;
+      if (f) {
+       f->dump_int("evict_bytes_sec", evict);
+      } else {
+       *out << ", " << pretty_si_t(evict) << "B/s evict";
+      }
+    }
+    if (pos_delta.stats.sum.num_promote) {
+      int64_t promote = pos_delta.stats.sum.num_promote / (double)delta_stamp;
+      if (f) {
+        f->dump_int("promote_op_per_sec", promote);
+      } else {
+        *out << ", " << pretty_si_t(promote) << "op/s promote";
+      }
+    }
+  }
+}
+
+void PGMap::pool_cache_io_rate_summary(Formatter *f, ostream *out,
+                                       uint64_t poolid) const
+{
+  ceph::unordered_map<uint64_t,pair<pool_stat_t,utime_t> >::const_iterator p =
+    per_pool_sum_delta.find(poolid);
+  if (p == per_pool_sum_delta.end())
+    return;
+  ceph::unordered_map<uint64_t,utime_t>::const_iterator ts =
+    per_pool_sum_deltas_stamps.find(p->first);
+  assert(ts != per_pool_sum_deltas_stamps.end());
+  cache_io_rate_summary(f, out, p->second.first, ts->second);
+}
+
 /**
  * update aggregated delta
  *
index 733b2f0ebcbdf4b26b6f42d0ae515c6c43e94985..b89e52a5c3441f91080f07a0237daa2fb17459ca 100644 (file)
@@ -334,6 +334,20 @@ public:
    */
   void pool_client_io_rate_summary(Formatter *f, ostream *out,
                                    uint64_t poolid) const;
+  /**
+   * Obtain a formatted/plain output for cache tier IO, source from stats for a
+   * given @p delta_sum pool over a given @p delta_stamp period of time.
+   */
+  void cache_io_rate_summary(Formatter *f, ostream *out,
+                             const pool_stat_t& delta_sum,
+                             utime_t delta_stamp) const;
+  /**
+   * Obtain a formatted/plain output for cache tier IO over a given pool
+   * with id @p pool_id.  We will then obtain pool-specific data
+   * from @p per_pool_sum_delta.
+   */
+  void pool_cache_io_rate_summary(Formatter *f, ostream *out,
+                                  uint64_t poolid) const;
 
   void print_summary(Formatter *f, ostream *out) const;
   void print_oneline_summary(Formatter *f, ostream *out) const;