]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: adding PerfCounters for backoff throttle 13017/head
authorchuanhong.wang <chuanhong.wang@163.com>
Fri, 20 Jan 2017 05:59:08 +0000 (13:59 +0800)
committerchuanhong.wang <chuanhong.wang@163.com>
Mon, 23 Jan 2017 05:06:55 +0000 (13:06 +0800)
Signed-off-by: Chuanhong Wang <chuanhong.wang@163.com>
src/common/Throttle.cc
src/common/Throttle.h
src/os/filestore/FileStore.cc
src/os/filestore/JournalThrottle.h
src/test/common/Throttle.cc

index 64aec5925618f70f6839e57861adafce2b4eae1c..56579ca8dd18f14411ad08e1c1cb9181c253db98 100644 (file)
@@ -251,6 +251,57 @@ void Throttle::reset()
   }
 }
 
+enum {
+  l_backoff_throttle_first = l_throttle_last + 1,
+  l_backoff_throttle_val,
+  l_backoff_throttle_max,
+  l_backoff_throttle_get,
+  l_backoff_throttle_get_sum,
+  l_backoff_throttle_take,
+  l_backoff_throttle_take_sum,
+  l_backoff_throttle_put,
+  l_backoff_throttle_put_sum,
+  l_backoff_throttle_wait,
+  l_backoff_throttle_last,
+};
+
+BackoffThrottle::BackoffThrottle(CephContext *cct, const std::string& n, unsigned expected_concurrency, bool _use_perf)
+  : cct(cct), name(n), logger(NULL),
+    conds(expected_concurrency),///< [in] determines size of conds
+    use_perf(_use_perf)
+{
+  if (!use_perf)
+    return;
+
+  if (cct->_conf->throttler_perf_counter) {
+    PerfCountersBuilder b(cct, string("throttle-") + name, l_backoff_throttle_first, l_backoff_throttle_last);
+    b.add_u64(l_backoff_throttle_val, "val", "Currently available throttle");
+    b.add_u64(l_backoff_throttle_max, "max", "Max value for throttle");
+    b.add_u64_counter(l_backoff_throttle_get, "get", "Gets");
+    b.add_u64_counter(l_backoff_throttle_get_sum, "get_sum", "Got data");
+    b.add_u64_counter(l_backoff_throttle_take, "take", "Takes");
+    b.add_u64_counter(l_backoff_throttle_take_sum, "take_sum", "Taken data");
+    b.add_u64_counter(l_backoff_throttle_put, "put", "Puts");
+    b.add_u64_counter(l_backoff_throttle_put_sum, "put_sum", "Put data");
+    b.add_time_avg(l_backoff_throttle_wait, "wait", "Waiting latency");
+
+    logger = b.create_perf_counters();
+    cct->get_perfcounters_collection()->add(logger);
+    logger->set(l_backoff_throttle_max, max);
+  }
+}
+
+BackoffThrottle::~BackoffThrottle()
+{
+  if (!use_perf)
+    return;
+
+  if (logger) {
+    cct->get_perfcounters_collection()->remove(logger);
+    delete logger;
+  }
+}
+
 bool BackoffThrottle::set_params(
   double _low_threshhold,
   double _high_threshhold,
@@ -332,6 +383,9 @@ bool BackoffThrottle::set_params(
   max_delay_per_count = _max_multiple / _expected_throughput;
   max = _throttle_max;
 
+  if (logger)
+    logger->set(l_backoff_throttle_max, max);
+
   if (high_threshhold - low_threshhold > 0) {
     s0 = high_delay_per_count / (high_threshhold - low_threshhold);
   } else {
@@ -373,18 +427,31 @@ std::chrono::duration<double> BackoffThrottle::get(uint64_t c)
   locker l(lock);
   auto delay = _get_delay(c);
 
+  if (logger) {
+    logger->inc(l_backoff_throttle_get);
+    logger->inc(l_backoff_throttle_get_sum, c);
+  }
+
   // fast path
   if (delay == std::chrono::duration<double>(0) &&
       waiters.empty() &&
       ((max == 0) || (current == 0) || ((current + c) <= max))) {
     current += c;
+
+    if (logger) {
+      logger->set(l_backoff_throttle_val, current);
+    }
+
     return std::chrono::duration<double>(0);
   }
 
   auto ticket = _push_waiter();
+  utime_t wait_from = ceph_clock_now();
+  bool waited = false;
 
   while (waiters.begin() != ticket) {
     (*ticket)->wait(l);
+    waited = true;
   }
 
   auto start = std::chrono::system_clock::now();
@@ -392,8 +459,10 @@ std::chrono::duration<double> BackoffThrottle::get(uint64_t c)
   while (true) {
     if (!((max == 0) || (current == 0) || (current + c) <= max)) {
       (*ticket)->wait(l);
+      waited = true;
     } else if (delay > std::chrono::duration<double>(0)) {
       (*ticket)->wait_for(l, delay);
+      waited = true;
     } else {
       break;
     }
@@ -404,6 +473,14 @@ std::chrono::duration<double> BackoffThrottle::get(uint64_t c)
   _kick_waiters();
 
   current += c;
+
+  if (logger) {
+    logger->set(l_backoff_throttle_val, current);
+    if (waited) {
+      logger->tinc(l_backoff_throttle_wait, ceph_clock_now() - wait_from);
+    }
+  }
+
   return std::chrono::system_clock::now() - start;
 }
 
@@ -413,6 +490,13 @@ uint64_t BackoffThrottle::put(uint64_t c)
   assert(current >= c);
   current -= c;
   _kick_waiters();
+
+  if (logger) {
+    logger->inc(l_backoff_throttle_put);
+    logger->inc(l_backoff_throttle_put_sum, c);
+    logger->set(l_backoff_throttle_val, current);
+  }
+
   return current;
 }
 
@@ -420,6 +504,13 @@ uint64_t BackoffThrottle::take(uint64_t c)
 {
   locker l(lock);
   current += c;
+
+  if (logger) {
+    logger->inc(l_backoff_throttle_take);
+    logger->inc(l_backoff_throttle_take_sum, c);
+    logger->set(l_backoff_throttle_val, current);
+  }
+
   return current;
 }
 
index 451e172b0beac6c0f98920d1ff979200b53b5ae7..96768aadccf434e0b7161dc8b6e20723b009c0c6 100644 (file)
@@ -146,6 +146,10 @@ public:
  * delay = e + (r - h)((m - e)/(1 - h))
  */
 class BackoffThrottle {
+  CephContext *cct;
+  const std::string name;
+  PerfCounters *logger;
+
   std::mutex lock;
   using locker = std::unique_lock<std::mutex>;
 
@@ -154,6 +158,8 @@ class BackoffThrottle {
   /// allocated once to avoid constantly allocating new ones
   vector<std::condition_variable> conds;
 
+  const bool use_perf;
+
   /// pointers into conds
   list<std::condition_variable*> waiters;
 
@@ -213,9 +219,10 @@ public:
   uint64_t get_current();
   uint64_t get_max();
 
-  BackoffThrottle(
-    unsigned expected_concurrency ///< [in] determines size of conds
-    ) : conds(expected_concurrency) {}
+  BackoffThrottle(CephContext *cct, const std::string& n,
+    unsigned expected_concurrency, ///< [in] determines size of conds
+    bool _use_perf = true);
+  ~BackoffThrottle();
 };
 
 
index 21cc1823d9910796081eeb8644c65d3ce2a89114..8a0c75206d87b3c9a9f4fabdd03f5da27949ccf4 100644 (file)
@@ -539,8 +539,8 @@ FileStore::FileStore(CephContext* cct, const std::string &base,
   next_osr_id(0),
   m_disable_wbthrottle(cct->_conf->filestore_odsync_write ||
                       !cct->_conf->filestore_wbthrottle_enable),
-  throttle_ops(cct->_conf->filestore_caller_concurrency),
-  throttle_bytes(cct->_conf->filestore_caller_concurrency),
+  throttle_ops(cct, "filestore_ops", cct->_conf->filestore_caller_concurrency),
+  throttle_bytes(cct, "filestore_bytes", cct->_conf->filestore_caller_concurrency),
   m_ondisk_finisher_num(cct->_conf->filestore_ondisk_finisher_threads),
   m_apply_finisher_num(cct->_conf->filestore_apply_finisher_threads),
   op_tp(cct, "FileStore::op_tp", "tp_fstore_op", cct->_conf->filestore_op_threads, "filestore_op_threads"),
index 8a7ce7261072c7e82665618d6ded7cf4e0951e26..75485d6d8f30461fc51ca5950dcc9b01e853f784 100644 (file)
@@ -95,7 +95,7 @@ public:
 
   JournalThrottle(
     unsigned expected_concurrency ///< [in] determines size of conds
-    ) : throttle(expected_concurrency) {}
+    ) : throttle(g_ceph_context, "filestore_journal", expected_concurrency) {}
 };
 
 #endif
index 000fb83a682b62cf4ef6b08c6d40b3a2adbd2c7e..99667f9a6cca48bc517331eb8735ca562c033362 100644 (file)
@@ -278,7 +278,7 @@ std::pair<double, std::chrono::duration<double> > test_backoff(
   uint64_t total_observed_total = 0;
   uint64_t total_observations = 0;
 
-  BackoffThrottle throttle(5);
+  BackoffThrottle throttle(g_ceph_context, "backoff_throttle_test", 5);
   bool valid = throttle.set_params(
     low_threshhold,
     high_threshhold,