#include "common/labeled_perf_counters.h"
#include "common/ceph_context.h"
+enum {
+ lm_rgw_first = 16000,
+ lm_rgw_put_b,
+ lm_rgw_last,
+};
+
class PerfCountersCache {
private:
CephContext *cct;
size_t curr_size = 0;
size_t target_size = 0;
- ceph::common::LabeledPerfCountersBuilder *default_lplb;
+ int lower_bound = 0;
+ int upper_bound = 0;
+ std::function<void(ceph::common::LabeledPerfCountersBuilder*)> lpcb_init;
+
std::unordered_map<std::string, ceph::common::LabeledPerfCounters*> cache;
public:
return NULL;
}
- ceph::common::LabeledPerfCounters* add(std::string key, ceph::common::LabeledPerfCountersBuilder *lplb = NULL) {
+ ceph::common::LabeledPerfCounters* add(std::string key) {
//void add(std::string key) {
auto labeled_counters = get(key);
if (!labeled_counters) {
+ // perf counters instance creation code
if(curr_size < target_size) {
- // perf counters instance creation code
- if(lplb) {
- labeled_counters = lplb->create_perf_counters();
- } else {
- labeled_counters = default_lplb->create_perf_counters();
- }
+ // new builder
+ //ceph::common::LabeledPerfCountersBuilder lpcb(cct, key, lower_bound, upper_bound);
+
+ //lpcb.add_u64_counter(lm_rgw_put, "put", "Puts");
+
+ auto lpcb = new ceph::common::LabeledPerfCountersBuilder(cct, key, lower_bound, upper_bound);
+ lpcb_init(lpcb);
+
+ // add counters to builder
+ labeled_counters = lpcb->create_perf_counters();
+ delete lpcb;
labeled_counters->set_name(key);
+
+ // add new labeled counters to collection, cache
cct->get_labeledperfcounters_collection()->add(labeled_counters);
cache[key] = labeled_counters;
+ /*
+ */
curr_size++;
}
}
}
}
- PerfCountersCache(CephContext *_cct, size_t _target_size, ceph::common::LabeledPerfCountersBuilder *_lplb) : cct(_cct), target_size(_target_size), default_lplb(_lplb) {}
+ PerfCountersCache(CephContext *_cct, size_t _target_size, int _lower_bound, int _upper_bound,
+ std::function<void(ceph::common::LabeledPerfCountersBuilder*)> _lpcb_init) : cct(_cct),
+ target_size(_target_size), lower_bound(_lower_bound), upper_bound(_upper_bound),
+ lpcb_init(_lpcb_init) {}
- ~PerfCountersCache() {
- delete default_lplb;
- default_lplb = NULL;
- }
+ ~PerfCountersCache() {}
};
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
-
- ceph::common::LabeledPerfCountersBuilder *lplb = new ceph::common::LabeledPerfCountersBuilder(cct, "labeled_rgw", l_rgw_first, l_rgw_last);
-
- // RGW emits comparatively few metrics, so let's be generous
- // and mark them all USEFUL to get transmission to ceph-mgr by default.
- lplb->set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
- lplb->add_u64_counter(l_rgw_req, "req", "Requests");
- lplb->add_u64_counter(l_rgw_failed_req, "failed_req", "Aborted requests");
-
- lplb->add_u64_counter(l_rgw_get, "get", "Gets");
- lplb->add_u64_counter(l_rgw_get_b, "get_b", "Size of gets");
- lplb->add_time_avg(l_rgw_get_lat, "get_initial_lat", "Get latency");
-
- lplb->add_u64_counter(l_rgw_put, "put", "Puts");
- lplb->add_u64_counter(l_rgw_put_b, "put_b", "Size of puts");
- lplb->add_time_avg(l_rgw_put_lat, "put_initial_lat", "Put latency");
-
- lplb->add_u64(l_rgw_qlen, "qlen", "Queue length");
- lplb->add_u64(l_rgw_qactive, "qactive", "Active requests queue");
-
- lplb->add_u64_counter(l_rgw_cache_hit, "cache_hit", "Cache hits");
- lplb->add_u64_counter(l_rgw_cache_miss, "cache_miss", "Cache miss");
-
- lplb->add_u64_counter(l_rgw_keystone_token_cache_hit, "keystone_token_cache_hit", "Keystone token cache hits");
- lplb->add_u64_counter(l_rgw_keystone_token_cache_miss, "keystone_token_cache_miss", "Keystone token cache miss");
- lplb->add_u64_counter(l_rgw_gc_retire, "gc_retire_object", "GC object retires");
-
- lplb->add_u64_counter(l_rgw_lc_expire_current, "lc_expire_current", "Lifecycle current expiration");
- lplb->add_u64_counter(l_rgw_lc_expire_noncurrent, "lc_expire_noncurrent", "Lifecycle non-current expiration");
- lplb->add_u64_counter(l_rgw_lc_expire_dm, "lc_expire_dm", "Lifecycle delete-marker expiration");
- lplb->add_u64_counter(l_rgw_lc_transition_current, "lc_transition_current", "Lifecycle current transition");
- lplb->add_u64_counter(l_rgw_lc_transition_noncurrent, "lc_transition_noncurrent", "Lifecycle non-current transition");
- lplb->add_u64_counter(l_rgw_lc_abort_mpu, "lc_abort_mpu", "Lifecycle abort multipart upload");
-
- lplb->add_u64_counter(l_rgw_pubsub_event_triggered, "pubsub_event_triggered", "Pubsub events with at least one topic");
- lplb->add_u64_counter(l_rgw_pubsub_event_lost, "pubsub_event_lost", "Pubsub events lost");
- lplb->add_u64_counter(l_rgw_pubsub_store_ok, "pubsub_store_ok", "Pubsub events successfully stored");
- lplb->add_u64_counter(l_rgw_pubsub_store_fail, "pubsub_store_fail", "Pubsub events failed to be stored");
- lplb->add_u64(l_rgw_pubsub_events, "pubsub_events", "Pubsub events in store");
- lplb->add_u64_counter(l_rgw_pubsub_push_ok, "pubsub_push_ok", "Pubsub events pushed to an endpoint");
- lplb->add_u64_counter(l_rgw_pubsub_push_failed, "pubsub_push_failed", "Pubsub events failed to be pushed to an endpoint");
- lplb->add_u64(l_rgw_pubsub_push_pending, "pubsub_push_pending", "Pubsub events pending reply from endpoint");
- lplb->add_u64_counter(l_rgw_pubsub_missing_conf, "pubsub_missing_conf", "Pubsub events could not be handled because of missing configuration");
-
- lplb->add_u64(l_rgw_lua_current_vms, "lua_current_vms", "Number of Lua VMs currently being executed");
- lplb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successfull executions of lua scripts");
- lplb->add_u64_counter(l_rgw_lua_script_fail, "lua_script_fail", "Failed executions of lua scripts");
+ std::function<void(ceph::common::LabeledPerfCountersBuilder*)> lpcb_init = add_rgw_counters;
uint64_t target_size = cct->_conf.get_val<uint64_t>("rgw_labeled_perfcounters_size");
- perf_counters_cache = new PerfCountersCache(cct, target_size, lplb);
- lplb = NULL;
+ perf_counters_cache = new PerfCountersCache(cct, target_size, lm_rgw_first, lm_rgw_last, lpcb_init);
return 0;
}
+void add_rgw_counters(ceph::common::LabeledPerfCountersBuilder *lpcb) {
+ lpcb->add_u64_counter(lm_rgw_put_b, "put_b", "Size of puts");
+ /*
+ lpcb.add_u64_counter(l_rgw_req, "req", "Requests");
+ lpcb.add_u64_counter(l_rgw_failed_req, "failed_req", "Aborted requests");
+
+ lpcb.add_u64_counter(l_rgw_get, "get", "Gets");
+ lpcb.add_u64_counter(l_rgw_get_b, "get_b", "Size of gets");
+ lpcb.add_time_avg(l_rgw_get_lat, "get_initial_lat", "Get latency");
+
+ lpcb.add_u64_counter(l_rgw_put, "put", "Puts");
+ lpcb.add_u64_counter(l_rgw_put_b, "put_b", "Size of puts");
+ lpcb.add_time_avg(l_rgw_put_lat, "put_initial_lat", "Put latency");
+
+ lpcb.add_u64(l_rgw_qlen, "qlen", "Queue length");
+ lpcb.add_u64(l_rgw_qactive, "qactive", "Active requests queue");
+
+ lpcb.add_u64_counter(l_rgw_cache_hit, "cache_hit", "Cache hits");
+ lpcb.add_u64_counter(l_rgw_cache_miss, "cache_miss", "Cache miss");
+
+ lpcb.add_u64_counter(l_rgw_keystone_token_cache_hit, "keystone_token_cache_hit", "Keystone token cache hits");
+ lpcb.add_u64_counter(l_rgw_keystone_token_cache_miss, "keystone_token_cache_miss", "Keystone token cache miss");
+ lpcb.add_u64_counter(l_rgw_gc_retire, "gc_retire_object", "GC object retires");
+
+ lpcb.add_u64_counter(l_rgw_lc_expire_current, "lc_expire_current", "Lifecycle current expiration");
+ lpcb.add_u64_counter(l_rgw_lc_expire_noncurrent, "lc_expire_noncurrent", "Lifecycle non-current expiration");
+ lpcb.add_u64_counter(l_rgw_lc_expire_dm, "lc_expire_dm", "Lifecycle delete-marker expiration");
+ lpcb.add_u64_counter(l_rgw_lc_transition_current, "lc_transition_current", "Lifecycle current transition");
+ lpcb.add_u64_counter(l_rgw_lc_transition_noncurrent, "lc_transition_noncurrent", "Lifecycle non-current transition");
+ lpcb.add_u64_counter(l_rgw_lc_abort_mpu, "lc_abort_mpu", "Lifecycle abort multipart upload");
+
+ lpcb.add_u64_counter(l_rgw_pubsub_event_triggered, "pubsub_event_triggered", "Pubsub events with at least one topic");
+ lpcb.add_u64_counter(l_rgw_pubsub_event_lost, "pubsub_event_lost", "Pubsub events lost");
+ lpcb.add_u64_counter(l_rgw_pubsub_store_ok, "pubsub_store_ok", "Pubsub events successfully stored");
+ lpcb.add_u64_counter(l_rgw_pubsub_store_fail, "pubsub_store_fail", "Pubsub events failed to be stored");
+ lpcb.add_u64(l_rgw_pubsub_events, "pubsub_events", "Pubsub events in store");
+ lpcb.add_u64_counter(l_rgw_pubsub_push_ok, "pubsub_push_ok", "Pubsub events pushed to an endpoint");
+ lpcb.add_u64_counter(l_rgw_pubsub_push_failed, "pubsub_push_failed", "Pubsub events failed to be pushed to an endpoint");
+ lpcb.add_u64(l_rgw_pubsub_push_pending, "pubsub_push_pending", "Pubsub events pending reply from endpoint");
+ lpcb.add_u64_counter(l_rgw_pubsub_missing_conf, "pubsub_missing_conf", "Pubsub events could not be handled because of missing configuration");
+
+ lpcb.add_u64(l_rgw_lua_current_vms, "lua_current_vms", "Number of Lua VMs currently being executed");
+ lpcb.add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successfull executions of lua scripts");
+ lpcb.add_u64_counter(l_rgw_lua_script_fail, "lua_script_fail", "Failed executions of lua scripts");
+ */
+}
+
void rgw_perf_stop(CephContext *cct)
{
ceph_assert(perfcounter);