"Average decompress latency");
b.add_time_avg(l_bluestore_csum_lat, "csum_lat",
"Average checksum latency");
- b.add_u64(l_bluestore_compress_success_count, "compress_success_count",
+ b.add_u64_counter(l_bluestore_compress_success_count, "compress_success_count",
"Sum for beneficial compress ops");
- b.add_u64(l_bluestore_compress_rejected_count, "compress_rejected_count",
+ b.add_u64_counter(l_bluestore_compress_rejected_count, "compress_rejected_count",
"Sum for compress ops rejected due to low net gain of space");
- b.add_u64(l_bluestore_write_pad_bytes, "write_pad_bytes",
+ b.add_u64_counter(l_bluestore_write_pad_bytes, "write_pad_bytes",
"Sum for write-op padded bytes");
- b.add_u64(l_bluestore_deferred_write_ops, "deferred_write_ops",
- "Sum for deferred write op");
- b.add_u64(l_bluestore_deferred_write_bytes, "deferred_write_bytes",
- "Sum for deferred write bytes", "def");
- b.add_u64(l_bluestore_write_penalty_read_ops, "write_penalty_read_ops",
- "Sum for write penalty read ops");
+ b.add_u64_counter(l_bluestore_deferred_write_ops, "deferred_write_ops",
+ "Sum for deferred write op");
+ b.add_u64_counter(l_bluestore_deferred_write_bytes, "deferred_write_bytes",
+ "Sum for deferred write bytes", "def");
+ b.add_u64_counter(l_bluestore_write_penalty_read_ops, "write_penalty_read_ops",
+ "Sum for write penalty read ops");
b.add_u64(l_bluestore_allocated, "bluestore_allocated",
"Sum for allocated bytes");
b.add_u64(l_bluestore_stored, "bluestore_stored",
b.add_u64(l_bluestore_onodes, "bluestore_onodes",
"Number of onodes in cache");
- b.add_u64(l_bluestore_onode_hits, "bluestore_onode_hits",
- "Sum for onode-lookups hit in the cache");
- b.add_u64(l_bluestore_onode_misses, "bluestore_onode_misses",
- "Sum for onode-lookups missed in the cache");
- b.add_u64(l_bluestore_onode_shard_hits, "bluestore_onode_shard_hits",
- "Sum for onode-shard lookups hit in the cache");
- b.add_u64(l_bluestore_onode_shard_misses, "bluestore_onode_shard_misses",
- "Sum for onode-shard lookups missed in the cache");
+ b.add_u64_counter(l_bluestore_onode_hits, "bluestore_onode_hits",
+ "Sum for onode-lookups hit in the cache");
+ b.add_u64_counter(l_bluestore_onode_misses, "bluestore_onode_misses",
+ "Sum for onode-lookups missed in the cache");
+ b.add_u64_counter(l_bluestore_onode_shard_hits, "bluestore_onode_shard_hits",
+ "Sum for onode-shard lookups hit in the cache");
+ b.add_u64_counter(l_bluestore_onode_shard_misses,
+ "bluestore_onode_shard_misses",
+ "Sum for onode-shard lookups missed in the cache");
b.add_u64(l_bluestore_extents, "bluestore_extents",
"Number of extents in cache");
b.add_u64(l_bluestore_blobs, "bluestore_blobs",
b.add_u64(l_bluestore_buffer_miss_bytes, "bluestore_buffer_miss_bytes",
"Sum for bytes of read missed in the cache");
- b.add_u64(l_bluestore_write_big, "bluestore_write_big",
- "Large min_alloc_size-aligned writes into fresh blobs");
- b.add_u64(l_bluestore_write_big_bytes, "bluestore_write_big_bytes",
- "Large min_alloc_size-aligned writes into fresh blobs (bytes)");
- b.add_u64(l_bluestore_write_big_blobs, "bluestore_write_big_blobs",
- "Large min_alloc_size-aligned writes into fresh blobs (blobs)");
- b.add_u64(l_bluestore_write_small, "bluestore_write_small",
- "Small writes into existing or sparse small blobs");
- b.add_u64(l_bluestore_write_small_bytes, "bluestore_write_small_bytes",
- "Small writes into existing or sparse small blobs (bytes)");
- b.add_u64(l_bluestore_write_small_unused, "bluestore_write_small_unused",
- "Small writes into unused portion of existing blob");
- b.add_u64(l_bluestore_write_small_deferred, "bluestore_write_small_deferred",
- "Small overwrites using deferred");
- b.add_u64(l_bluestore_write_small_pre_read, "bluestore_write_small_pre_read",
- "Small writes that required we read some data (possibly cached) to "
- "fill out the block");
- b.add_u64(l_bluestore_write_small_new, "bluestore_write_small_new",
- "Small write into new (sparse) blob");
-
- b.add_u64(l_bluestore_txc, "bluestore_txc", "Transactions committed");
- b.add_u64(l_bluestore_onode_reshard, "bluestore_onode_reshard",
- "Onode extent map reshard events");
- b.add_u64(l_bluestore_blob_split, "bluestore_blob_split",
- "Sum for blob splitting due to resharding");
- b.add_u64(l_bluestore_extent_compress, "bluestore_extent_compress",
- "Sum for extents that have been removed due to compression");
- b.add_u64(l_bluestore_gc_merged, "bluestore_gc_merged",
- "Sum for extents that have been merged due to garbage collection");
+ b.add_u64_counter(l_bluestore_write_big, "bluestore_write_big",
+ "Large aligned writes into fresh blobs");
+ b.add_u64_counter(l_bluestore_write_big_bytes, "bluestore_write_big_bytes",
+ "Large aligned writes into fresh blobs (bytes)");
+ b.add_u64_counter(l_bluestore_write_big_blobs, "bluestore_write_big_blobs",
+ "Large aligned writes into fresh blobs (blobs)");
+ b.add_u64_counter(l_bluestore_write_small, "bluestore_write_small",
+ "Small writes into existing or sparse small blobs");
+ b.add_u64_counter(l_bluestore_write_small_bytes, "bluestore_write_small_bytes",
+ "Small writes into existing or sparse small blobs (bytes)");
+ b.add_u64_counter(l_bluestore_write_small_unused,
+ "bluestore_write_small_unused",
+ "Small writes into unused portion of existing blob");
+ b.add_u64_counter(l_bluestore_write_small_deferred,
+ "bluestore_write_small_deferred",
+ "Small overwrites using deferred");
+ b.add_u64_counter(l_bluestore_write_small_pre_read,
+ "bluestore_write_small_pre_read",
+ "Small writes that required we read some data (possibly "
+ "cached) to fill out the block");
+ b.add_u64_counter(l_bluestore_write_small_new, "bluestore_write_small_new",
+ "Small write into new (sparse) blob");
+
+ b.add_u64_counter(l_bluestore_txc, "bluestore_txc", "Transactions committed");
+ b.add_u64_counter(l_bluestore_onode_reshard, "bluestore_onode_reshard",
+ "Onode extent map reshard events");
+ b.add_u64_counter(l_bluestore_blob_split, "bluestore_blob_split",
+ "Sum for blob splitting due to resharding");
+ b.add_u64_counter(l_bluestore_extent_compress, "bluestore_extent_compress",
+ "Sum for extents that have been removed due to compression");
+ b.add_u64_counter(l_bluestore_gc_merged, "bluestore_gc_merged",
+ "Sum for extents that have been merged due to garbage "
+ "collection");
logger = b.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
}