}
f->close_section();
} else {
- if (d->type & PERFCOUNTER_LONGRUNAVG) {
- f->open_object_section(d->name);
- pair<uint64_t,uint64_t> a = d->read_avg();
- if (d->type & PERFCOUNTER_U64) {
- f->dump_unsigned("avgcount", a.second);
- f->dump_unsigned("sum", a.first);
- } else if (d->type & PERFCOUNTER_TIME) {
- f->dump_unsigned("avgcount", a.second);
- f->dump_format_unquoted("sum", "%" PRId64 ".%09" PRId64,
- a.first / 1000000000ull,
- a.first % 1000000000ull);
- uint64_t count = a.second;
- uint64_t sum_ns = a.first;
- if (count) {
- uint64_t avg_ns = sum_ns / count;
- f->dump_format_unquoted("avgtime", "%" PRId64 ".%09" PRId64,
- avg_ns / 1000000000ull,
- avg_ns % 1000000000ull);
+ if(d->accessed) {
+ if (d->type & PERFCOUNTER_LONGRUNAVG) {
+ f->open_object_section(d->name);
+ pair<uint64_t,uint64_t> a = d->read_avg();
+ if (d->type & PERFCOUNTER_U64) {
+ f->dump_unsigned("avgcount", a.second);
+ f->dump_unsigned("sum", a.first);
+ } else if (d->type & PERFCOUNTER_TIME) {
+ f->dump_unsigned("avgcount", a.second);
+ f->dump_format_unquoted("sum", "%" PRId64 ".%09" PRId64,
+ a.first / 1000000000ull,
+ a.first % 1000000000ull);
+ uint64_t count = a.second;
+ uint64_t sum_ns = a.first;
+ if (count) {
+ uint64_t avg_ns = sum_ns / count;
+ f->dump_format_unquoted("avgtime", "%" PRId64 ".%09" PRId64,
+ avg_ns / 1000000000ull,
+ avg_ns % 1000000000ull);
+ } else {
+ f->dump_format_unquoted("avgtime", "%" PRId64 ".%09" PRId64, 0, 0);
+ }
} else {
- f->dump_format_unquoted("avgtime", "%" PRId64 ".%09" PRId64, 0, 0);
+ ceph_abort();
}
- } else {
- ceph_abort();
- }
- f->close_section();
- } else if (d->type & PERFCOUNTER_HISTOGRAM) {
- ceph_assert(d->type == (PERFCOUNTER_HISTOGRAM | PERFCOUNTER_COUNTER | PERFCOUNTER_U64));
- ceph_assert(d->histogram);
- f->open_object_section(d->name);
- d->histogram->dump_formatted(f);
- f->close_section();
- } else {
- if(d->accessed) {
+ f->close_section();
+ } else if (d->type & PERFCOUNTER_HISTOGRAM) {
+ ceph_assert(d->type == (PERFCOUNTER_HISTOGRAM | PERFCOUNTER_COUNTER | PERFCOUNTER_U64));
+ ceph_assert(d->histogram);
+ f->open_object_section(d->name);
+ d->histogram->dump_formatted(f);
+ f->close_section();
+ } else {
uint64_t v = d->u64;
if (d->type & PERFCOUNTER_U64) {
f->dump_unsigned(d->name, v);
default: tank
services:
- rgw
+- name: rgw_labeled_perfcounters_size
+ type: uint
+ level: advanced
+ desc: number of labeled perfcounters the rgw can store
+ default: -1
+ services:
+ - rgw
#define RGW_PERFCOUNTERS_CACHE_H
#include "common/intrusive_lru.h"
-#include "common/perf_counters.h"
#include "common/labeled_perf_counters.h"
#include "common/ceph_context.h"
std::string, PerfCountersCacheEntry, item_to_key<PerfCountersCacheEntry>>> {
std::string instance_labels;
ceph::common::LabeledPerfCounters *labeled_perfcounters_instance = NULL;
- //CephContext *cct = NULL;
- //PerfCountersCollection *collection = NULL;
PerfCountersCacheEntry(std::string key) : instance_labels(key) {}
class PerfCountersCache : public PerfCountersCacheEntry::lru_t {
private:
CephContext *cct;
+ size_t curr_size = 0;
+ size_t target_size = 0;
+ ceph::common::LabeledPerfCountersBuilder *lplb;
public:
void add(std::string key) {
auto [ref, key_existed] = get_or_create(key);
if (!key_existed) {
- // perf counters instance creation code
- ceph::common::LabeledPerfCountersBuilder lplb(cct, key, l_rgw_metrics_first, l_rgw_metrics_last);
- lplb.add_u64_counter(l_rgw_metrics_req, "req", "number of reqs", NULL, 8, UNIT_NONE);
- lplb.add_u64_counter(l_rgw_metrics_failed_req, "failed_req", "Aborted Requests", NULL, 8, UNIT_NONE);
- lplb.add_u64_counter(l_rgw_metrics_put_b, "put_b", "Size of puts", NULL, 8, UNIT_NONE);
- lplb.add_u64_counter(l_rgw_metrics_get_b, "get_b", "Size of gets", NULL, 8, UNIT_NONE);
-
- ceph::common::LabeledPerfCounters *labeled_counters = lplb.create_perf_counters();
- cct->get_labeledperfcounters_collection()->add(labeled_counters);
- //cct->get_perfcounters_collection()->add(counters);
- ref->labeled_perfcounters_instance = labeled_counters;
- //ref->collection = cct->get_perfcounters_collection();
- //ref->collection->add(counters);
- //ref->cct = cct;
+ if(curr_size < target_size) {
+ // perf counters instance creation code
+ ceph::common::LabeledPerfCounters *labeled_counters = lplb->create_perf_counters();
+ cct->get_labeledperfcounters_collection()->add(labeled_counters);
+ ref->labeled_perfcounters_instance = labeled_counters;
+ }
}
}
return val;
}
- PerfCountersCache(CephContext *_cct, size_t _cache_size) {
+ PerfCountersCache(CephContext *_cct, size_t _target_size, ceph::common::LabeledPerfCountersBuilder *_lplb) {
cct = _cct;
- set_target_size(_cache_size);
+ target_size = _target_size;
+ lplb = _lplb;
+ set_target_size(_target_size);
}
- ~PerfCountersCache() {}
+ ~PerfCountersCache() {
+ delete lplb;
+ lplb = NULL;
+ }
};
#endif
labels = ceph::perf_counters::cache_key("z_rgw", {{"Bucket", s->bucket_name}, {"User", s->user->get_display_name()}});
ldpp_dout(this, 20) << "labels for perf counters cache for l_rgw_metrics_get_b: " << labels << dendl;
perf_counters_cache->add(labels);
- perf_counters_cache->inc(labels, l_rgw_metrics_get_b, s->obj_size);
+ perf_counters_cache->inc(labels, l_rgw_get_b, s->obj_size);
perfcounter->inc(l_rgw_get_b, end - ofs);
s->obj_size = ofs;
s->object->set_obj_size(ofs);
+ //uint64_t rgw_labeled_perfcounters_size = s->cct->_conf.get_val<uint64_t>("rgw_labeled_perfcounters_size");
+ //ldpp_dout(this, 20) << "rgw_labeled_perfcounters_size is: " << rgw_labeled_perfcounters_size << dendl;
std::string labels = ceph::perf_counters::cache_key("z_rgw", {{"Bucket", s->bucket_name}, {"User", s->user->get_display_name()}});
ldpp_dout(this, 20) << "labels for perf counters cache for l_rgw_metrics_put_b: " << labels << dendl;
perf_counters_cache->add(labels);
- perf_counters_cache->inc(labels, l_rgw_metrics_put_b, s->obj_size);
+ perf_counters_cache->inc(labels, l_rgw_put_b, s->obj_size);
perfcounter->inc(l_rgw_put_b, s->obj_size);
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
- // TODO: change target_size to be a config var
- size_t target_size = 10;
- perf_counters_cache = new PerfCountersCache(cct, target_size);
+ ceph::common::LabeledPerfCountersBuilder *lplb = new ceph::common::LabeledPerfCountersBuilder(cct, "rgw", l_rgw_first, l_rgw_last);
+
+ // RGW emits comparatively few metrics, so let's be generous
+ // and mark them all USEFUL to get transmission to ceph-mgr by default.
+ lplb->set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
+ lplb->add_u64_counter(l_rgw_req, "req", "Requests");
+ lplb->add_u64_counter(l_rgw_failed_req, "failed_req", "Aborted requests");
+
+ lplb->add_u64_counter(l_rgw_get, "get", "Gets");
+ lplb->add_u64_counter(l_rgw_get_b, "get_b", "Size of gets");
+ lplb->add_time_avg(l_rgw_get_lat, "get_initial_lat", "Get latency");
+
+ lplb->add_u64_counter(l_rgw_put, "put", "Puts");
+ lplb->add_u64_counter(l_rgw_put_b, "put_b", "Size of puts");
+ lplb->add_time_avg(l_rgw_put_lat, "put_initial_lat", "Put latency");
+
+ lplb->add_u64(l_rgw_qlen, "qlen", "Queue length");
+ lplb->add_u64(l_rgw_qactive, "qactive", "Active requests queue");
+
+ lplb->add_u64_counter(l_rgw_cache_hit, "cache_hit", "Cache hits");
+ lplb->add_u64_counter(l_rgw_cache_miss, "cache_miss", "Cache miss");
+
+ lplb->add_u64_counter(l_rgw_keystone_token_cache_hit, "keystone_token_cache_hit", "Keystone token cache hits");
+ lplb->add_u64_counter(l_rgw_keystone_token_cache_miss, "keystone_token_cache_miss", "Keystone token cache miss");
+ lplb->add_u64_counter(l_rgw_gc_retire, "gc_retire_object", "GC object retires");
+
+ lplb->add_u64_counter(l_rgw_lc_expire_current, "lc_expire_current", "Lifecycle current expiration");
+ lplb->add_u64_counter(l_rgw_lc_expire_noncurrent, "lc_expire_noncurrent", "Lifecycle non-current expiration");
+ lplb->add_u64_counter(l_rgw_lc_expire_dm, "lc_expire_dm", "Lifecycle delete-marker expiration");
+ lplb->add_u64_counter(l_rgw_lc_transition_current, "lc_transition_current", "Lifecycle current transition");
+ lplb->add_u64_counter(l_rgw_lc_transition_noncurrent, "lc_transition_noncurrent", "Lifecycle non-current transition");
+ lplb->add_u64_counter(l_rgw_lc_abort_mpu, "lc_abort_mpu", "Lifecycle abort multipart upload");
+
+ lplb->add_u64_counter(l_rgw_pubsub_event_triggered, "pubsub_event_triggered", "Pubsub events with at least one topic");
+ lplb->add_u64_counter(l_rgw_pubsub_event_lost, "pubsub_event_lost", "Pubsub events lost");
+ lplb->add_u64_counter(l_rgw_pubsub_store_ok, "pubsub_store_ok", "Pubsub events successfully stored");
+ lplb->add_u64_counter(l_rgw_pubsub_store_fail, "pubsub_store_fail", "Pubsub events failed to be stored");
+ lplb->add_u64(l_rgw_pubsub_events, "pubsub_events", "Pubsub events in store");
+ lplb->add_u64_counter(l_rgw_pubsub_push_ok, "pubsub_push_ok", "Pubsub events pushed to an endpoint");
+ lplb->add_u64_counter(l_rgw_pubsub_push_failed, "pubsub_push_failed", "Pubsub events failed to be pushed to an endpoint");
+ lplb->add_u64(l_rgw_pubsub_push_pending, "pubsub_push_pending", "Pubsub events pending reply from endpoint");
+ lplb->add_u64_counter(l_rgw_pubsub_missing_conf, "pubsub_missing_conf", "Pubsub events could not be handled because of missing configuration");
+
+ lplb->add_u64(l_rgw_lua_current_vms, "lua_current_vms", "Number of Lua VMs currently being executed");
+ lplb->add_u64_counter(l_rgw_lua_script_ok, "lua_script_ok", "Successfull executions of lua scripts");
+ lplb->add_u64_counter(l_rgw_lua_script_fail, "lua_script_fail", "Failed executions of lua scripts");
+
+ uint64_t target_size = cct->_conf.get_val<uint64_t>("rgw_labeled_perfcounters_size");
+ perf_counters_cache = new PerfCountersCache(cct, target_size, lplb);
+ lplb = NULL;
return 0;
}
#pragma once
#include "include/common_fwd.h"
#include "common/perf_counters_cache.h"
+#include "common/labeled_perf_counters.h"
extern PerfCounters *perfcounter;
extern PerfCountersCache *perf_counters_cache;