rgw and rbd_mirror were enabling a perfcounter monitoring the heartbeatmap.
Enable this unconditionally for all ccts. This is simpler and more
useful!
Add a CINIT_FLAG to avoid starting them up. This is only used by the unit
tests which do not expect all the cct crap and we don't want to
break every time we add a new cct counter. That means removing
the CephContextPerfCounters test.
Signed-off-by: Sage Weil <sage@redhat.com>
using ceph::bufferlist;
using ceph::HeartbeatMap;
+// for CINIT_FLAGS
+#include "common/common_init.h"
+
+#include <iostream>
+#include <pthread.h>
+
#ifdef WITH_SEASTAR
CephContext::CephContext()
: _conf{ceph::common::local_conf()},
_cct->_heartbeat_map->check_touch_file();
// refresh the perf coutners
- _cct->refresh_perf_values();
+ _cct->_refresh_perf_values();
}
return NULL;
}
_crypto_aes(NULL),
_plugin_registry(NULL),
_lockdep_obs(NULL),
- crush_location(this),
- _cct_perf(NULL)
+ crush_location(this)
{
_log = new ceph::logging::Log(&_conf->subsys);
_log->start();
_service_thread->create("service");
}
+ if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
+ _enable_perf_counter();
+
// make logs flush on_exit()
if (_conf->log_flush_on_exit)
_log->set_flush_on_exit();
thread->exit_thread();
thread->join();
delete thread;
+
+ if (!(get_init_flags() & CINIT_FLAG_NO_CCT_PERF_COUNTERS))
+ _disable_perf_counter();
}
uint32_t CephContext::get_module_type() const
return _perf_counters_collection;
}
-void CephContext::enable_perf_counter()
+void CephContext::_enable_perf_counter()
{
+ assert(!_cct_perf);
PerfCountersBuilder plb(this, "cct", l_cct_first, l_cct_last);
plb.add_u64(l_cct_total_workers, "total_workers", "Total workers");
plb.add_u64(l_cct_unhealthy_workers, "unhealthy_workers", "Unhealthy workers");
- PerfCounters *perf_tmp = plb.create_perf_counters();
-
- std::unique_lock<ceph::spinlock> lg(_cct_perf_lock);
- ceph_assert(_cct_perf == NULL);
- _cct_perf = perf_tmp;
- lg.unlock();
-
+ _cct_perf = plb.create_perf_counters();
_perf_counters_collection->add(_cct_perf);
}
-void CephContext::disable_perf_counter()
+void CephContext::_disable_perf_counter()
{
+ if (!_cct_perf) {
+ return;
+ }
_perf_counters_collection->remove(_cct_perf);
-
- std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
delete _cct_perf;
- _cct_perf = NULL;
+ _cct_perf = nullptr;
}
-void CephContext::refresh_perf_values()
+void CephContext::_refresh_perf_values()
{
- std::lock_guard<ceph::spinlock> lg(_cct_perf_lock);
-
if (_cct_perf) {
_cct_perf->set(l_cct_total_workers, _heartbeat_map->get_total_workers());
_cct_perf->set(l_cct_unhealthy_workers, _heartbeat_map->get_unhealthy_workers());
return _heartbeat_map;
}
- /**
- * Enable the performance counter, currently we only have counter for the
- * number of total/unhealthy workers.
- */
- void enable_perf_counter();
-
- /**
- * Disable the performance counter.
- */
- void disable_perf_counter();
-
- /**
- * Refresh perf counter values.
- */
- void refresh_perf_values();
-
/**
* Get the admin socket associated with this CephContext.
*
l_cct_unhealthy_workers,
l_cct_last
};
- PerfCounters *_cct_perf;
- ceph::spinlock _cct_perf_lock;
+ PerfCounters *_cct_perf = nullptr;
+
+ /**
+ * Enable the performance counters.
+ */
+ void _enable_perf_counter();
+
+ /**
+ * Disable the performance counter.
+ */
+ void _disable_perf_counter();
+
+ /**
+ * Refresh perf counter values.
+ */
+ void _refresh_perf_values();
friend class CephContextObs;
};
// don't contact mons for config
CINIT_FLAG_NO_MON_CONFIG = 0x20,
+
+ // don't expose default cct perf counters
+ CINIT_FLAG_NO_CCT_PERF_COUNTERS = 0x40,
};
#ifndef WITH_SEASTAR
init_timer.add_event_after(g_conf()->rgw_init_timeout, new C_InitTimeout);
mutex.Unlock();
- // Enable the perf counter before starting the service thread
- g_ceph_context->enable_perf_counter();
-
common_init_finish(g_ceph_context);
init_async_signal_handler();
std::vector<const char*> args;
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
- CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
+ CINIT_FLAG_NO_DEFAULT_CONFIG_FILE|
+ CINIT_FLAG_NO_CCT_PERF_COUNTERS);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
ASSERT_EQ("{}", msg);
}
-TEST(PerfCounters, CephContextPerfCounters) {
- // Enable the perf counter
- g_ceph_context->enable_perf_counter();
- AdminSocketClient client(get_rand_socket_path());
- std::string msg;
-
- ASSERT_EQ("", client.do_request("{ \"prefix\": \"perf dump\", \"format\": \"json\" }", &msg));
- ASSERT_EQ(sd("{\"cct\":{\"total_workers\":0,\"unhealthy_workers\":0}}"), msg);
-
- // Restore to avoid impact to other test cases
- g_ceph_context->disable_perf_counter();
-}
-
TEST(PerfCounters, ResetPerfCounters) {
AdminSocketClient client(get_rand_socket_path());
std::string msg;
if (g_conf()->daemonize) {
global_init_daemonize(g_ceph_context);
}
- g_ceph_context->enable_perf_counter();
common_init_finish(g_ceph_context);