// logger
PerfCountersBuilder plb(cct, "client", l_c_first, l_c_last);
- plb.add_fl_avg(l_c_reply, "reply");
- plb.add_fl_avg(l_c_lat, "lat");
- plb.add_fl_avg(l_c_wrlat, "wrlat");
- plb.add_fl_avg(l_c_owrlat, "owrlat");
- plb.add_fl_avg(l_c_ordlat, "ordlat");
+ plb.add_time_avg(l_c_reply, "reply");
+ plb.add_time_avg(l_c_lat, "lat");
+ plb.add_time_avg(l_c_wrlat, "wrlat");
+ plb.add_time_avg(l_c_owrlat, "owrlat");
+ plb.add_time_avg(l_c_ordlat, "ordlat");
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
utime_t lat = ceph_clock_now(cct);
lat -= request->sent_stamp;
ldout(cct, 20) << "lat " << lat << dendl;
- logger->finc(l_c_lat,(double)lat);
- logger->finc(l_c_reply,(double)lat);
+ logger->tinc(l_c_lat, lat);
+ logger->tinc(l_c_reply, lat);
request->put();
// time
utime_t lat = ceph_clock_now(cct);
lat -= start;
- logger->finc(l_c_wrlat,(double)lat);
+ logger->tinc(l_c_wrlat, lat);
// assume success for now. FIXME.
uint64_t totalwritten = size;
lat -= start;
if (client->logger) {
if (write)
- client->logger->fset(l_c_owrlat, lat);
+ client->logger->tset(l_c_owrlat, lat);
else
- client->logger->fset(l_c_ordlat, lat);
+ client->logger->tset(l_c_ordlat, lat);
}
}
finisher_thread(this) {
PerfCountersBuilder b(cct, string("finisher-") + name,
l_finisher_first, l_finisher_last);
- b.add_fl_avg(l_finisher_queue_len, "queue_len");
+ b.add_time_avg(l_finisher_queue_len, "queue_len");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_finisher_queue_len, 0);
if (cct) {
PerfCountersBuilder b(cct, string("mutex-") + name,
l_mutex_first, l_mutex_last);
- b.add_fl_avg(l_mutex_wait, "wait");
+ b.add_time_avg(l_mutex_wait, "wait");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
logger->set(l_mutex_wait, 0);
start = ceph_clock_now(cct);
int r = pthread_mutex_lock(&_m);
if (logger && cct && cct->_conf->mutex_perf_counter)
- logger->finc(l_mutex_wait,
+ logger->tinc(l_mutex_wait,
ceph_clock_now(cct) - start);
assert(r == 0);
if (lockdep && g_lockdep) _locked();
b.add_u64_counter(l_throttle_take_sum, "take_sum");
b.add_u64_counter(l_throttle_put, "put");
b.add_u64_counter(l_throttle_put_sum, "put_sum");
- b.add_fl_avg(l_throttle_wait, "wait");
+ b.add_time_avg(l_throttle_wait, "wait");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
if (waited) {
ldout(cct, 3) << "_wait finished waiting" << dendl;
utime_t dur = ceph_clock_now(cct) - start;
- logger->finc(l_throttle_wait, dur);
+ logger->tinc(l_throttle_wait, dur);
}
delete cv;
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return;
- data.u.u64 += amt;
+ data.u64 += amt;
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
assert(!(data.type & PERFCOUNTER_LONGRUNAVG));
if (!(data.type & PERFCOUNTER_U64))
return;
- assert(data.u.u64 >= amt);
- data.u.u64 -= amt;
+ assert(data.u64 >= amt);
+ data.u64 -= amt;
}
void PerfCounters::set(int idx, uint64_t amt)
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return;
- data.u.u64 = amt;
+ data.u64 = amt;
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
const perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
if (!(data.type & PERFCOUNTER_U64))
return 0;
- return data.u.u64;
+ return data.u64;
}
-void PerfCounters::finc(int idx, double amt)
+void PerfCounters::tinc(int idx, utime_t amt)
{
if (!m_cct->_conf->perf)
return;
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
- if (!(data.type & PERFCOUNTER_FLOAT))
+ if (!(data.type & PERFCOUNTER_TIME))
return;
- data.u.dbl += amt;
+ data.u64 += amt.to_nsec();
if (data.type & PERFCOUNTER_LONGRUNAVG)
data.avgcount++;
}
-void PerfCounters::fset(int idx, double amt)
+void PerfCounters::tset(int idx, utime_t amt)
{
if (!m_cct->_conf->perf)
return;
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
- if (!(data.type & PERFCOUNTER_FLOAT))
+ if (!(data.type & PERFCOUNTER_TIME))
return;
- data.u.dbl = amt;
+ data.u64 = amt.to_nsec();
if (data.type & PERFCOUNTER_LONGRUNAVG)
assert(0);
}
-double PerfCounters::fget(int idx) const
+utime_t PerfCounters::tget(int idx) const
{
if (!m_cct->_conf->perf)
- return 0.0;
+ return utime_t();
Mutex::Locker lck(m_lock);
assert(idx > m_lower_bound);
assert(idx < m_upper_bound);
const perf_counter_data_any_d& data(m_data[idx - m_lower_bound - 1]);
- if (!(data.type & PERFCOUNTER_FLOAT))
- return 0.0;
- return data.u.dbl;
+ if (!(data.type & PERFCOUNTER_TIME))
+ return utime_t();
+ return utime_t(data.u64 / 1000000000ull, data.u64 % 1000000000ull);
}
void PerfCounters::write_json_to_buf(bufferlist& bl, bool schema)
PerfCounters::perf_counter_data_any_d::perf_counter_data_any_d()
: name(NULL),
type(PERFCOUNTER_NONE),
+ u64(0),
avgcount(0)
{
- memset(&u, 0, sizeof(u));
}
void PerfCounters::perf_counter_data_any_d::write_schema_json(char *buf, size_t buf_sz) const
if (type & PERFCOUNTER_U64) {
snprintf(buf, buf_sz, "\"%s\":{\"avgcount\":%" PRId64 ","
"\"sum\":%" PRId64 "}",
- name, avgcount, u.u64);
+ name, avgcount, u64);
}
- else if (type & PERFCOUNTER_FLOAT) {
+ else if (type & PERFCOUNTER_TIME) {
snprintf(buf, buf_sz, "\"%s\":{\"avgcount\":%" PRId64 ","
- "\"sum\":%g}",
- name, avgcount, u.dbl);
+ "\"sum\":%llu.%09llu}",
+ name, avgcount, u64 / 1000000000ull, u64 % 1000000000ull);
}
else {
assert(0);
else {
if (type & PERFCOUNTER_U64) {
snprintf(buf, buf_sz, "\"%s\":%" PRId64,
- name, u.u64);
+ name, u64);
}
- else if (type & PERFCOUNTER_FLOAT) {
- snprintf(buf, buf_sz, "\"%s\":%g", name, u.dbl);
+ else if (type & PERFCOUNTER_TIME) {
+ snprintf(buf, buf_sz, "\"%s\":%llu.%09llu", name, u64 / 1000000000ull, u64 % 1000000000ull);
}
else {
assert(0);
add_impl(idx, name, PERFCOUNTER_U64 | PERFCOUNTER_LONGRUNAVG);
}
-void PerfCountersBuilder::add_fl(int idx, const char *name)
+void PerfCountersBuilder::add_time(int idx, const char *name)
{
- add_impl(idx, name, PERFCOUNTER_FLOAT);
+ add_impl(idx, name, PERFCOUNTER_TIME);
}
-void PerfCountersBuilder::add_fl_avg(int idx, const char *name)
+void PerfCountersBuilder::add_time_avg(int idx, const char *name)
{
- add_impl(idx, name, PERFCOUNTER_FLOAT | PERFCOUNTER_LONGRUNAVG);
+ add_impl(idx, name, PERFCOUNTER_TIME | PERFCOUNTER_LONGRUNAVG);
}
void PerfCountersBuilder::add_impl(int idx, const char *name, int ty)
#include "common/config_obs.h"
#include "common/Mutex.h"
#include "include/buffer.h"
+#include "include/utime.h"
#include <stdint.h>
#include <string>
enum perfcounter_type_d
{
PERFCOUNTER_NONE = 0,
- PERFCOUNTER_FLOAT = 0x1,
+ PERFCOUNTER_TIME = 0x1,
PERFCOUNTER_U64 = 0x2,
PERFCOUNTER_LONGRUNAVG = 0x4,
PERFCOUNTER_COUNTER = 0x8,
* and accessed. For a counter, use the inc(counter, amount) function (note
* that amount defaults to 1 if you don't set it). For a value, use the
* set(index, value) function.
- * (For floats, use the finc and fset variants.)
+ * (For time, use the tinc and tset variants.)
*
* If for some reason you would like to reset your counters, you can do so using
* the set functions even if they are counters, and you can also
* increment your values if for some reason you wish to.
*
- * For the floating-point average, it returns the current value and
+ * For the time average, it returns the current value and
* the "avgcount" member when read off. avgcount is incremented when you call
- * finc. Calling fset on an average is an error and will assert out.
+ * tinc. Calling tset on an average is an error and will assert out.
*/
class PerfCounters
{
void set(int idx, uint64_t v);
uint64_t get(int idx) const;
- void fset(int idx, double v);
- void finc(int idx, double v);
- double fget(int idx) const;
+ void tset(int idx, utime_t v);
+ void tinc(int idx, utime_t v);
+ utime_t tget(int idx) const;
void write_json_to_buf(ceph::bufferlist& bl, bool schema);
const char *name;
enum perfcounter_type_d type;
- union {
- uint64_t u64;
- double dbl;
- } u;
+ uint64_t u64;
uint64_t avgcount;
};
typedef std::vector<perf_counter_data_any_d> perf_counter_data_vec_t;
void add_u64(int key, const char *name);
void add_u64_counter(int key, const char *name);
void add_u64_avg(int key, const char *name);
- void add_fl(int key, const char *name);
- void add_fl_avg(int key, const char *name);
+ void add_time(int key, const char *name);
+ void add_time_avg(int key, const char *name);
PerfCounters* create_perf_counters();
private:
PerfCountersBuilder(const PerfCountersBuilder &rhs);
__u32& sec_ref() { return tv.tv_sec; }
__u32& nsec_ref() { return tv.tv_nsec; }
+ uint64_t to_nsec() const {
+ return (uint64_t)tv.tv_nsec + (uint64_t)tv.tv_sec * 1000000000ull;
+ }
+
void copy_to_timeval(struct timeval *v) const {
v->tv_sec = tv.tv_sec;
v->tv_usec = tv.tv_nsec/1000;
}
switch (aio_type) {
case AIO_TYPE_READ:
- ictx->perfcounter->finc(l_librbd_aio_rd_latency, elapsed); break;
+ ictx->perfcounter->tinc(l_librbd_aio_rd_latency, elapsed); break;
case AIO_TYPE_WRITE:
- ictx->perfcounter->finc(l_librbd_aio_wr_latency, elapsed); break;
+ ictx->perfcounter->tinc(l_librbd_aio_wr_latency, elapsed); break;
case AIO_TYPE_DISCARD:
- ictx->perfcounter->finc(l_librbd_aio_discard_latency, elapsed); break;
+ ictx->perfcounter->tinc(l_librbd_aio_discard_latency, elapsed); break;
default:
lderr(ictx->cct) << "completed invalid aio_type: " << aio_type << dendl;
break;
plb.add_u64_counter(l_librbd_rd, "rd");
plb.add_u64_counter(l_librbd_rd_bytes, "rd_bytes");
- plb.add_fl_avg(l_librbd_rd_latency, "rd_latency");
+ plb.add_time_avg(l_librbd_rd_latency, "rd_latency");
plb.add_u64_counter(l_librbd_wr, "wr");
plb.add_u64_counter(l_librbd_wr_bytes, "wr_bytes");
- plb.add_fl_avg(l_librbd_wr_latency, "wr_latency");
+ plb.add_time_avg(l_librbd_wr_latency, "wr_latency");
plb.add_u64_counter(l_librbd_discard, "discard");
plb.add_u64_counter(l_librbd_discard_bytes, "discard_bytes");
- plb.add_fl_avg(l_librbd_discard_latency, "discard_latency");
+ plb.add_time_avg(l_librbd_discard_latency, "discard_latency");
plb.add_u64_counter(l_librbd_flush, "flush");
plb.add_u64_counter(l_librbd_aio_rd, "aio_rd");
plb.add_u64_counter(l_librbd_aio_rd_bytes, "aio_rd_bytes");
- plb.add_fl_avg(l_librbd_aio_rd_latency, "aio_rd_latency");
+ plb.add_time_avg(l_librbd_aio_rd_latency, "aio_rd_latency");
plb.add_u64_counter(l_librbd_aio_wr, "aio_wr");
plb.add_u64_counter(l_librbd_aio_wr_bytes, "aio_wr_bytes");
- plb.add_fl_avg(l_librbd_aio_wr_latency, "aio_wr_latency");
+ plb.add_time_avg(l_librbd_aio_wr_latency, "aio_wr_latency");
plb.add_u64_counter(l_librbd_aio_discard, "aio_discard");
plb.add_u64_counter(l_librbd_aio_discard_bytes, "aio_discard_bytes");
- plb.add_fl_avg(l_librbd_aio_discard_latency, "aio_discard_latency");
+ plb.add_time_avg(l_librbd_aio_discard_latency, "aio_discard_latency");
plb.add_u64_counter(l_librbd_snap_create, "snap_create");
plb.add_u64_counter(l_librbd_snap_remove, "snap_remove");
plb.add_u64_counter(l_librbd_snap_rollback, "snap_rollback");
}
elapsed = ceph_clock_now(ictx->cct) - start_time;
- ictx->perfcounter->finc(l_librbd_rd_latency, elapsed);
+ ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->inc(l_librbd_rd);
ictx->perfcounter->inc(l_librbd_rd_bytes, mylen);
return total_read;
return ret;
elapsed = ceph_clock_now(ictx->cct) - start_time;
- ictx->perfcounter->finc(l_librbd_wr_latency, elapsed);
+ ictx->perfcounter->tinc(l_librbd_wr_latency, elapsed);
ictx->perfcounter->inc(l_librbd_wr);
ictx->perfcounter->inc(l_librbd_wr_bytes, mylen);
return mylen;
mds_plb.add_u64_counter(l_mds_req, "req"); // FIXME: nobody is actually setting this
mds_plb.add_u64_counter(l_mds_reply, "reply");
- mds_plb.add_fl_avg(l_mds_replyl, "replyl");
+ mds_plb.add_time_avg(l_mds_replyl, "replyl");
mds_plb.add_u64_counter(l_mds_fw, "fw");
mds_plb.add_u64_counter(l_mds_dir_f, "dir_f");
mds_plb.add_u64_counter(l_mds_trino, "trino");
mds_plb.add_u64_counter(l_mds_tlock, "tlock");
- mds_plb.add_fl(l_mds_l, "l");
+ mds_plb.add_u64(l_mds_l, "l");
mds_plb.add_u64(l_mds_q, "q");
mds_plb.add_u64(l_mds_popanyd, "popanyd"); // FIXME: unused
mds_plb.add_u64(l_mds_popnest, "popnest");
if (logger) {
req_rate = logger->get(l_mds_req);
- logger->fset(l_mds_l, (int)load.mds_load());
+ logger->set(l_mds_l, 100 * load.mds_load());
logger->set(l_mds_q, messenger->get_dispatch_queue_len());
logger->set(l_mds_sm, mdcache->num_subtrees());
mdr->did_early_reply = true;
mds->logger->inc(l_mds_reply);
- double lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
- mds->logger->finc(l_mds_replyl, lat);
+ utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
+ mds->logger->tinc(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
}
if (!did_early_reply && !is_replay) {
mds->logger->inc(l_mds_reply);
- double lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
- mds->logger->finc(l_mds_replyl, lat);
+ utime_t lat = ceph_clock_now(g_ceph_context) - mdr->client_request->get_recv_stamp();
+ mds->logger->tinc(l_mds_replyl, lat);
dout(20) << "lat " << lat << dendl;
if (tracei)
<< " " << next.finish
<< " lat " << lat << dendl;
if (logger) {
- logger->finc(l_os_j_lat, lat);
+ logger->tinc(l_os_j_lat, lat);
}
if (next.finish)
finisher->queue(next.finish);
plb.add_u64(l_os_jq_max_bytes, "journal_queue_max_bytes");
plb.add_u64(l_os_jq_bytes, "journal_queue_bytes");
plb.add_u64_counter(l_os_j_bytes, "journal_bytes");
- plb.add_fl_avg(l_os_j_lat, "journal_latency");
+ plb.add_time_avg(l_os_j_lat, "journal_latency");
plb.add_u64_counter(l_os_j_wr, "journal_wr");
plb.add_u64_avg(l_os_j_wr_bytes, "journal_wr_bytes");
plb.add_u64(l_os_oq_max_ops, "op_queue_max_ops");
plb.add_u64(l_os_oq_max_bytes, "op_queue_max_bytes");
plb.add_u64(l_os_oq_bytes, "op_queue_bytes");
plb.add_u64_counter(l_os_bytes, "bytes");
- plb.add_fl_avg(l_os_apply_lat, "apply_latency");
+ plb.add_time_avg(l_os_apply_lat, "apply_latency");
plb.add_u64(l_os_committing, "committing");
plb.add_u64_counter(l_os_commit, "commitcycle");
- plb.add_fl_avg(l_os_commit_len, "commitcycle_interval");
- plb.add_fl_avg(l_os_commit_lat, "commitcycle_latency");
+ plb.add_time_avg(l_os_commit_len, "commitcycle_interval");
+ plb.add_time_avg(l_os_commit_lat, "commitcycle_latency");
plb.add_u64_counter(l_os_j_full, "journal_full");
logger = plb.create_perf_counters();
utime_t lat = ceph_clock_now(g_ceph_context);
lat -= o->start;
- logger->finc(l_os_apply_lat, lat);
+ logger->tinc(l_os_apply_lat, lat);
if (o->onreadable_sync) {
o->onreadable_sync->finish(0);
dout(10) << "sync_entry commit took " << lat << ", interval was " << dur << dendl;
logger->inc(l_os_commit);
- logger->finc(l_os_commit_lat, lat);
- logger->finc(l_os_commit_len, dur);
+ logger->tinc(l_os_commit_lat, lat);
+ logger->tinc(l_os_commit_len, dur);
apply_manager.commit_finish();
osd_plb.add_u64_counter(l_osd_op, "op"); // client ops
osd_plb.add_u64_counter(l_osd_op_inb, "op_in_bytes"); // client op in bytes (writes)
osd_plb.add_u64_counter(l_osd_op_outb, "op_out_bytes"); // client op out bytes (reads)
- osd_plb.add_fl_avg(l_osd_op_lat, "op_latency"); // client op latency
+ osd_plb.add_time_avg(l_osd_op_lat, "op_latency"); // client op latency
osd_plb.add_u64_counter(l_osd_op_r, "op_r"); // client reads
osd_plb.add_u64_counter(l_osd_op_r_outb, "op_r_out_bytes"); // client read out bytes
- osd_plb.add_fl_avg(l_osd_op_r_lat, "op_r_latency"); // client read latency
+ osd_plb.add_time_avg(l_osd_op_r_lat, "op_r_latency"); // client read latency
osd_plb.add_u64_counter(l_osd_op_w, "op_w"); // client writes
osd_plb.add_u64_counter(l_osd_op_w_inb, "op_w_in_bytes"); // client write in bytes
- osd_plb.add_fl_avg(l_osd_op_w_rlat, "op_w_rlat"); // client write readable/applied latency
- osd_plb.add_fl_avg(l_osd_op_w_lat, "op_w_latency"); // client write latency
+ osd_plb.add_time_avg(l_osd_op_w_rlat, "op_w_rlat"); // client write readable/applied latency
+ osd_plb.add_time_avg(l_osd_op_w_lat, "op_w_latency"); // client write latency
osd_plb.add_u64_counter(l_osd_op_rw, "op_rw"); // client rmw
osd_plb.add_u64_counter(l_osd_op_rw_inb, "op_rw_in_bytes"); // client rmw in bytes
osd_plb.add_u64_counter(l_osd_op_rw_outb,"op_rw_out_bytes"); // client rmw out bytes
- osd_plb.add_fl_avg(l_osd_op_rw_rlat,"op_rw_rlat"); // client rmw readable/applied latency
- osd_plb.add_fl_avg(l_osd_op_rw_lat, "op_rw_latency"); // client rmw latency
+ osd_plb.add_time_avg(l_osd_op_rw_rlat,"op_rw_rlat"); // client rmw readable/applied latency
+ osd_plb.add_time_avg(l_osd_op_rw_lat, "op_rw_latency"); // client rmw latency
osd_plb.add_u64_counter(l_osd_sop, "subop"); // subops
osd_plb.add_u64_counter(l_osd_sop_inb, "subop_in_bytes"); // subop in bytes
- osd_plb.add_fl_avg(l_osd_sop_lat, "subop_latency"); // subop latency
+ osd_plb.add_time_avg(l_osd_sop_lat, "subop_latency"); // subop latency
osd_plb.add_u64_counter(l_osd_sop_w, "subop_w"); // replicated (client) writes
osd_plb.add_u64_counter(l_osd_sop_w_inb, "subop_w_in_bytes"); // replicated write in bytes
- osd_plb.add_fl_avg(l_osd_sop_w_lat, "subop_w_latency"); // replicated write latency
+ osd_plb.add_time_avg(l_osd_sop_w_lat, "subop_w_latency"); // replicated write latency
osd_plb.add_u64_counter(l_osd_sop_pull, "subop_pull"); // pull request
- osd_plb.add_fl_avg(l_osd_sop_pull_lat, "subop_pull_latency");
+ osd_plb.add_time_avg(l_osd_sop_pull_lat, "subop_pull_latency");
osd_plb.add_u64_counter(l_osd_sop_push, "subop_push"); // push (write)
osd_plb.add_u64_counter(l_osd_sop_push_inb, "subop_push_in_bytes");
- osd_plb.add_fl_avg(l_osd_sop_push_lat, "subop_push_latency");
+ osd_plb.add_time_avg(l_osd_sop_push_lat, "subop_push_latency");
osd_plb.add_u64_counter(l_osd_pull, "pull"); // pull requests sent
osd_plb.add_u64_counter(l_osd_push, "push"); // push messages
osd_plb.add_u64_counter(l_osd_rop, "recovery_ops"); // recovery ops (started)
- osd_plb.add_fl(l_osd_loadavg, "loadavg");
+ osd_plb.add_u64(l_osd_loadavg, "loadavg");
osd_plb.add_u64(l_osd_buf, "buffer_bytes"); // total ceph::buffer bytes
osd_plb.add_u64(l_osd_pg, "numpg"); // num pgs
// get CPU load avg
double loadavgs[1];
if (getloadavg(loadavgs, 1) == 1)
- logger->fset(l_osd_loadavg, loadavgs[0]);
+ logger->set(l_osd_loadavg, 100 * loadavgs[0]);
dout(30) << "heartbeat checking stats" << dendl;
osd->logger->inc(l_osd_op_outb, outb);
osd->logger->inc(l_osd_op_inb, inb);
- osd->logger->finc(l_osd_op_lat, latency);
+ osd->logger->tinc(l_osd_op_lat, latency);
if (m->may_read() && m->may_write()) {
osd->logger->inc(l_osd_op_rw);
osd->logger->inc(l_osd_op_rw_inb, inb);
osd->logger->inc(l_osd_op_rw_outb, outb);
- osd->logger->finc(l_osd_op_rw_rlat, rlatency);
- osd->logger->finc(l_osd_op_rw_lat, latency);
+ osd->logger->tinc(l_osd_op_rw_rlat, rlatency);
+ osd->logger->tinc(l_osd_op_rw_lat, latency);
} else if (m->may_read()) {
osd->logger->inc(l_osd_op_r);
osd->logger->inc(l_osd_op_r_outb, outb);
- osd->logger->finc(l_osd_op_r_lat, latency);
+ osd->logger->tinc(l_osd_op_r_lat, latency);
} else if (m->may_write()) {
osd->logger->inc(l_osd_op_w);
osd->logger->inc(l_osd_op_w_inb, inb);
- osd->logger->finc(l_osd_op_w_rlat, rlatency);
- osd->logger->finc(l_osd_op_w_lat, latency);
+ osd->logger->tinc(l_osd_op_w_rlat, rlatency);
+ osd->logger->tinc(l_osd_op_w_lat, latency);
} else
assert(0);
osd->logger->inc(l_osd_sop);
osd->logger->inc(l_osd_sop_inb, inb);
- osd->logger->finc(l_osd_sop_lat, latency);
+ osd->logger->tinc(l_osd_sop_lat, latency);
if (tag_inb)
osd->logger->inc(tag_inb, inb);
- osd->logger->finc(tag_lat, latency);
+ osd->logger->tinc(tag_lat, latency);
dout(15) << "log_subop_stats " << *op->request << " inb " << inb << " latency " << latency << dendl;
}
if (logger) {
utime_t lat = ceph_clock_now(cct);
lat -= stamp;
- logger->finc(logger_key_lat, lat);
+ logger->tinc(logger_key_lat, lat);
}
// adjust safe_pos
"data_overwritten_while_flushing");
plb.add_u64_counter(l_objectcacher_write_ops_blocked, "write_ops_blocked");
plb.add_u64_counter(l_objectcacher_write_bytes_blocked, "write_bytes_blocked");
- plb.add_fl(l_objectcacher_write_time_blocked, "write_time_blocked");
+ plb.add_time(l_objectcacher_write_time_blocked, "write_time_blocked");
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
perfcounter->inc(l_objectcacher_write_ops_blocked);
perfcounter->inc(l_objectcacher_write_bytes_blocked, len);
utime_t blocked = ceph_clock_now(cct) - start;
- perfcounter->finc(l_objectcacher_write_time_blocked, (double) blocked);
+ perfcounter->tinc(l_objectcacher_write_time_blocked, blocked);
}
return ret;
}
plb.add_u64_counter(l_rgw_get, "get");
plb.add_u64_counter(l_rgw_get_b, "get_b");
- plb.add_fl_avg(l_rgw_get_lat, "get_initial_lat");
+ plb.add_time_avg(l_rgw_get_lat, "get_initial_lat");
plb.add_u64_counter(l_rgw_put, "put");
plb.add_u64_counter(l_rgw_put_b, "put_b");
- plb.add_fl_avg(l_rgw_put_lat, "put_initial_lat");
+ plb.add_time_avg(l_rgw_put_lat, "put_initial_lat");
plb.add_u64(l_rgw_qlen, "qlen");
plb.add_u64(l_rgw_qactive, "qactive");
cur_ofs += len;
ofs += len;
ret = 0;
- perfcounter->finc(l_rgw_get_lat,
+ perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
send_response_data(bl);
found_end = true;
}
- perfcounter->finc(l_rgw_get_lat,
+ perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
if (found_start) {
ofs += len;
ret = 0;
- perfcounter->finc(l_rgw_get_lat,
+ perfcounter->tinc(l_rgw_get_lat,
(ceph_clock_now(s->cct) - start_time));
ret = send_response_data(bl);
bl.clear();
ret = processor->complete(etag, attrs);
done:
dispose_processor(processor);
- perfcounter->finc(l_rgw_put_lat,
+ perfcounter->tinc(l_rgw_put_lat,
(ceph_clock_now(s->cct) - s->time));
}
PerfCountersBuilder bld(cct, "test_perfcounter_1",
TEST_PERFCOUNTERS1_ELEMENT_FIRST, TEST_PERFCOUNTERS1_ELEMENT_LAST);
bld.add_u64(TEST_PERFCOUNTERS1_ELEMENT_1, "element1");
- bld.add_fl(TEST_PERFCOUNTERS1_ELEMENT_2, "element2");
- bld.add_fl_avg(TEST_PERFCOUNTERS1_ELEMENT_3, "element3");
+ bld.add_time(TEST_PERFCOUNTERS1_ELEMENT_2, "element2");
+ bld.add_time_avg(TEST_PERFCOUNTERS1_ELEMENT_3, "element3");
return bld.create_perf_counters();
}
std::string msg;
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,"
- "'element2':0,'element3':{'avgcount':0,'sum':0}}}"), msg);
+ "'element2':0.000000000,'element3':{'avgcount':0,'sum':0.000000000}}}"), msg);
fake_pf->inc(TEST_PERFCOUNTERS1_ELEMENT_1);
- fake_pf->fset(TEST_PERFCOUNTERS1_ELEMENT_2, 0.5);
- fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 100.0);
+ fake_pf->tset(TEST_PERFCOUNTERS1_ELEMENT_2, utime_t(0, 500000000));
+ fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t(100, 0));
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,"
- "'element2':0.5,'element3':{'avgcount':1,'sum':100}}}"), msg);
- fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 0.0);
- fake_pf->finc(TEST_PERFCOUNTERS1_ELEMENT_3, 25.0);
+ "'element2':0.500000000,'element3':{'avgcount':1,'sum':100.000000000}}}"), msg);
+ fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t());
+ fake_pf->tinc(TEST_PERFCOUNTERS1_ELEMENT_3, utime_t(25,0));
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
- ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,'element2':0.5,"
- "'element3':{'avgcount':3,'sum':125}}}"), msg);
+ ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':1,'element2':0.500000000,"
+ "'element3':{'avgcount':3,'sum':125.000000000}}}"), msg);
}
enum {
PerfCountersBuilder bld(cct, "test_perfcounter_2",
TEST_PERFCOUNTERS2_ELEMENT_FIRST, TEST_PERFCOUNTERS2_ELEMENT_LAST);
bld.add_u64(TEST_PERFCOUNTERS2_ELEMENT_FOO, "foo");
- bld.add_fl(TEST_PERFCOUNTERS2_ELEMENT_BAR, "bar");
+ bld.add_time(TEST_PERFCOUNTERS2_ELEMENT_BAR, "bar");
return bld.create_perf_counters();
}
std::string msg;
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
- ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,'element2':0,'element3':"
- "{'avgcount':0,'sum':0}},'test_perfcounter_2':{'foo':0,'bar':0}}"), msg);
+ ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':0,'element2':0.000000000,'element3':"
+ "{'avgcount':0,'sum':0.000000000}},'test_perfcounter_2':{'foo':0,'bar':0.000000000}}"), msg);
fake_pf1->inc(TEST_PERFCOUNTERS1_ELEMENT_1);
fake_pf1->inc(TEST_PERFCOUNTERS1_ELEMENT_1, 5);
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
- ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0,'element3':"
- "{'avgcount':0,'sum':0}},'test_perfcounter_2':{'foo':0,'bar':0}}"), msg);
+ ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0.000000000,'element3':"
+ "{'avgcount':0,'sum':0.000000000}},'test_perfcounter_2':{'foo':0,'bar':0.000000000}}"), msg);
coll->remove(fake_pf2);
ASSERT_EQ("", client.do_request("perfcounters_dump", &msg));
- ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0,"
- "'element3':{'avgcount':0,'sum':0}}}"), msg);
+ ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':6,'element2':0.000000000,"
+ "'element3':{'avgcount':0,'sum':0.000000000}}}"), msg);
ASSERT_EQ("", client.do_request("perfcounters_schema", &msg));
ASSERT_EQ(sd("{'test_perfcounter_1':{'element1':{'type':2},"
"'element2':{'type':1},'element3':{'type':5}}}"), msg);