]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
common/ceph_common: add heartbeat perf counters unconditionally
authorSage Weil <sage@redhat.com>
Thu, 4 May 2017 20:35:47 +0000 (15:35 -0500)
committerSage Weil <sage@redhat.com>
Mon, 29 Oct 2018 18:19:23 +0000 (13:19 -0500)
rgw and rbd_mirror were enabling a perfcounter monitoring the heartbeatmap.
Enable this unconditionally for all ccts.  This is simpler and more
useful!

Add a CINIT_FLAG to avoid starting them up. This is only used by the unit
tests which do not expect all the cct crap and we don't want to
break every time we add a new cct counter.  That means removing
the CephContextPerfCounters test.

Signed-off-by: Sage Weil <sage@redhat.com>
src/common/ceph_context.cc
src/common/ceph_context.h
src/common/common_init.h
src/rgw/rgw_main.cc
src/test/perf_counters.cc
src/tools/rbd_mirror/main.cc

index 81245039fcf0f62299b021d60e33c20817a947fc..1c19cd71af406ff182c99f1257c348a9199b7148 100644 (file)
 using ceph::bufferlist;
 using ceph::HeartbeatMap;
 
+// for CINIT_FLAGS
+#include "common/common_init.h"
+
+#include <iostream>
+#include <pthread.h>
+
 #ifdef WITH_SEASTAR
 CephContext::CephContext()
   : _conf{ceph::common::local_conf()},
@@ -195,7 +201,7 @@ public:
       _cct->_heartbeat_map->check_touch_file();
 
       // refresh the perf coutners
-      _cct->refresh_perf_values();
+      _cct->_refresh_perf_values();
     }
     return NULL;
   }
@@ -603,8 +609,7 @@ CephContext::CephContext(uint32_t module_type_,
     _crypto_aes(NULL),
     _plugin_registry(NULL),
     _lockdep_obs(NULL),
-    crush_location(this),
-    _cct_perf(NULL)
+    crush_location(this)
 {
   _log = new ceph::logging::Log(&_conf->subsys);
   _log->start();
@@ -744,6 +749,9 @@ void CephContext::start_service_thread()
     _service_thread->create("service");
   }
 
+  if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
+    _enable_perf_counter();
+
   // make logs flush on_exit()
   if (_conf->log_flush_on_exit)
     _log->set_flush_on_exit();
@@ -780,6 +788,9 @@ void CephContext::join_service_thread()
   thread->exit_thread();
   thread->join();
   delete thread;
+
+  if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
+    _disable_perf_counter();
 }
 
 uint32_t CephContext::get_module_type() const
@@ -802,34 +813,28 @@ PerfCountersCollection *CephContext::get_perfcounters_collection()
   return _perf_counters_collection;
 }
 
-void CephContext::enable_perf_counter()
+void CephContext::_enable_perf_counter()
 {
+  assert(!_cct_perf);
   PerfCountersBuilder plb(this, "cct", l_cct_first, l_cct_last);
   plb.add_u64(l_cct_total_workers, "total_workers", "Total workers");
   plb.add_u64(l_cct_unhealthy_workers, "unhealthy_workers", "Unhealthy workers");
-  PerfCounters *perf_tmp = plb.create_perf_counters();
-
-  std::unique_lock<ceph::spinlock> lg(_cct_perf_lock);
-  ceph_assert(_cct_perf == NULL);
-  _cct_perf = perf_tmp;
-  lg.unlock();
-
+  _cct_perf = plb.create_perf_counters();
   _perf_counters_collection->add(_cct_perf);
 }
 
-void CephContext::disable_perf_counter()
+void CephContext::_disable_perf_counter()
 {
+  if (!_cct_perf) {
+    return;
+  }
   _perf_counters_collection->remove(_cct_perf);
-
-  std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
   delete _cct_perf;
-  _cct_perf = NULL;
+  _cct_perf = nullptr;
 }
 
-void CephContext::refresh_perf_values()
+void CephContext::_refresh_perf_values()
 {
-  std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
-
   if (_cct_perf) {
     _cct_perf->set(l_cct_total_workers, _heartbeat_map->get_total_workers());
     _cct_perf->set(l_cct_unhealthy_workers, _heartbeat_map->get_unhealthy_workers());
index e2e91bb4e182484635835d4b6148e6ace5a7d80b..3d2fd9949a27f622d5ba4041baab099e67af52ea 100644 (file)
@@ -137,22 +137,6 @@ public:
     return _heartbeat_map;
   }
 
-  /**
-   * Enable the performance counter, currently we only have counter for the
-   * number of total/unhealthy workers.
-   */
-  void enable_perf_counter();
-
-  /**
-   * Disable the performance counter.
-   */
-  void disable_perf_counter();
-
-  /**
-   * Refresh perf counter values.
-   */
-  void refresh_perf_values();
-
   /**
    * Get the admin socket associated with this CephContext.
    *
@@ -333,8 +317,22 @@ private:
     l_cct_unhealthy_workers,
     l_cct_last
   };
-  PerfCounters *_cct_perf;
-  ceph::spinlock _cct_perf_lock;
+  PerfCounters *_cct_perf = nullptr;
+
+  /**
+   * Enable the performance counters.
+   */
+  void _enable_perf_counter();
+
+  /**
+   * Disable the performance counter.
+   */
+  void _disable_perf_counter();
+
+  /**
+   * Refresh perf counter values.
+   */
+  void _refresh_perf_values();
 
   friend class CephContextObs;
 };
index a2c3dd84fd62b6bb80fd1423f3468c3c2a2d18b9..ee315e7643416001d436975642e0b7cc9ece18b5 100644 (file)
@@ -39,6 +39,9 @@ enum common_init_flags_t {
 
   // don't contact mons for config
   CINIT_FLAG_NO_MON_CONFIG = 0x20,
+
+  // don't expose default cct perf counters
+  CINIT_FLAG_NO_CCT_PERF_COUNTERS = 0x40,
 };
 
 #ifndef WITH_SEASTAR
index 53e65609386e91ab5b3157a52048de69e7b4101e..0d44ce33504aad3c32ad06111ff144b52d2a63e2 100644 (file)
@@ -274,9 +274,6 @@ int main(int argc, const char **argv)
   init_timer.add_event_after(g_conf()->rgw_init_timeout, new C_InitTimeout);
   mutex.Unlock();
 
-  // Enable the perf counter before starting the service thread
-  g_ceph_context->enable_perf_counter();
-
   common_init_finish(g_ceph_context);
 
   init_async_signal_handler();
index 87ce7163cdeadb61570b08ce795dd736d88a5cdb..6827197ca8edeef9f0d33d0e738def9ea5b3af1a 100644 (file)
@@ -52,7 +52,8 @@ int main(int argc, char **argv) {
   std::vector<const char*> args;
   auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
                         CODE_ENVIRONMENT_UTILITY,
-                        CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
+                        CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
+                        CINIT_FLAG_NO_CCT_PERF_COUNTERS);
   common_init_finish(g_ceph_context);
   ::testing::InitGoogleTest(&argc, argv);
   return RUN_ALL_TESTS();
@@ -188,19 +189,6 @@ TEST(PerfCounters, MultiplePerfCounters) {
   ASSERT_EQ("{}", msg);
 }
 
-TEST(PerfCounters, CephContextPerfCounters) {
-  // Enable the perf counter
-  g_ceph_context->enable_perf_counter();
-  AdminSocketClient client(get_rand_socket_path());
-  std::string msg;
-
-  ASSERT_EQ("", client.do_request("{ \"prefix\": \"perf dump\", \"format\": \"json\" }", &msg));
-  ASSERT_EQ(sd("{\"cct\":{\"total_workers\":0,\"unhealthy_workers\":0}}"), msg);
-
-  // Restore to avoid impact to other test cases
-  g_ceph_context->disable_perf_counter();
-}
-
 TEST(PerfCounters, ResetPerfCounters) {
   AdminSocketClient client(get_rand_socket_path());
   std::string msg;
index e93db4c4f8feba82abecc98068ce831be9cef68c..38ebabba419fa7f3ae2c0149b6534ecea1811a39 100644 (file)
@@ -49,7 +49,6 @@ int main(int argc, const char **argv)
   if (g_conf()->daemonize) {
     global_init_daemonize(g_ceph_context);
   }
-  g_ceph_context->enable_perf_counter();
 
   common_init_finish(g_ceph_context);