},
sm::description("total number of io depth")
),
+ sm::make_counter(
+ "record_group_padding_bytes",
+ [this] {
+ return record_submitter.get_record_group_padding_bytes();
+ },
+ sm::description("bytes of metadata padding when write record groups")
+ ),
+ sm::make_counter(
+ "record_group_metadata_bytes",
+ [this] {
+ return record_submitter.get_record_group_metadata_bytes();
+ },
+ sm::description("bytes of raw metadata when write record groups")
+ ),
+ sm::make_counter(
+ "record_group_data_bytes",
+ [this] {
+ return record_submitter.get_record_group_data_bytes();
+ },
+ sm::description("bytes of data when write record groups")
+ ),
}
);
}
});
}
-ceph::bufferlist Journal::RecordBatch::encode_batch(
+std::pair<ceph::bufferlist, record_group_size_t>
+Journal::RecordBatch::encode_batch(
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce)
{
state = state_t::SUBMITTING;
submitting_size = pending.get_size();
- submitting_length = pending.size.get_encoded_length();
- submitting_mdlength = pending.size.get_mdlength();
+ auto gsize = pending.size;
+ submitting_length = gsize.get_encoded_length();
+ submitting_mdlength = gsize.get_mdlength();
auto bl = encode_records(pending, committed_to, segment_nonce);
// Note: pending is cleared here
assert(bl.length() == (std::size_t)submitting_length);
- return bl;
+ return std::make_pair(bl, gsize);
}
void Journal::RecordBatch::set_result(
pop_free_batch();
increment_io();
- ceph::bufferlist to_write = p_batch->encode_batch(
+ auto [to_write, sizes] = p_batch->encode_batch(
journal_segment_manager.get_committed_to(),
journal_segment_manager.get_nonce());
+ account_submission(sizes);
std::ignore = journal_segment_manager.write(to_write
).safe_then([this, p_batch](auto write_result) {
finish_submit_batch(p_batch, write_result);
bool flush)
{
assert(!p_current_batch->is_submitting());
- record_batch_stats.increment(
+ stats.record_batch_stats.increment(
p_current_batch->get_num_records() + 1);
auto write_fut = [this, flush, record=std::move(record)]() mutable {
if (flush && p_current_batch->is_empty()) {
journal_segment_manager.get_block_size(),
journal_segment_manager.get_committed_to(),
journal_segment_manager.get_nonce());
+ account_submission(sizes);
return journal_segment_manager.write(to_write
).safe_then([mdlength = sizes.get_mdlength()](auto write_result) {
return record_locator_t{
extent_len_t block_size);
// Encode the batched records for write.
- ceph::bufferlist encode_batch(
+ std::pair<ceph::bufferlist, record_group_size_t> encode_batch(
const journal_seq_t& committed_to,
segment_nonce_t segment_nonce);
JournalSegmentManager&);
grouped_io_stats get_record_batch_stats() const {
- return record_batch_stats;
+ return stats.record_batch_stats;
}
grouped_io_stats get_io_depth_stats() const {
- return io_depth_stats;
+ return stats.io_depth_stats;
+ }
+
+ uint64_t get_record_group_padding_bytes() const {
+ return stats.record_group_padding_bytes;
+ }
+
+ uint64_t get_record_group_metadata_bytes() const {
+ return stats.record_group_metadata_bytes;
+ }
+
+ uint64_t get_record_group_data_bytes() const {
+ return stats.record_group_data_bytes;
}
void reset_stats() {
- record_batch_stats = {};
- io_depth_stats = {};
+ stats = {};
}
void set_write_pipeline(WritePipeline *_write_pipeline) {
void increment_io() {
++num_outstanding_io;
- io_depth_stats.increment(num_outstanding_io);
+ stats.io_depth_stats.increment(num_outstanding_io);
update_state();
}
free_batch_ptrs.pop_front();
}
+ void account_submission(const record_group_size_t& size) {
+ stats.record_group_padding_bytes +=
+ (size.get_mdlength() - size.get_raw_mdlength());
+ stats.record_group_metadata_bytes += size.get_raw_mdlength();
+ stats.record_group_data_bytes += size.dlength;
+ }
+
using maybe_result_t = RecordBatch::maybe_result_t;
void finish_submit_batch(RecordBatch*, maybe_result_t);
seastar::circular_buffer<RecordBatch*> free_batch_ptrs;
std::optional<seastar::promise<> > wait_submit_promise;
- grouped_io_stats record_batch_stats;
- grouped_io_stats io_depth_stats;
+ struct {
+ grouped_io_stats record_batch_stats;
+ grouped_io_stats io_depth_stats;
+ uint64_t record_group_padding_bytes = 0;
+ uint64_t record_group_metadata_bytes = 0;
+ uint64_t record_group_data_bytes = 0;
+ } stats;
};
SegmentProvider* segment_provider = nullptr;