}
JournalTrimmerImpl::JournalTrimmerImpl(
+ unsigned int store_index,
BackrefManager &backref_manager,
config_t config,
backend_type_t type,
config.validate();
ceph_assert(roll_start >= 0);
ceph_assert(roll_size > 0);
- register_metrics();
+ register_metrics(store_index);
}
void JournalTrimmerImpl::set_journal_head(journal_seq_t head)
});
}
-void JournalTrimmerImpl::register_metrics()
+void JournalTrimmerImpl::register_metrics(unsigned int store_index)
{
namespace sm = seastar::metrics;
metrics.add_group("journal_trimmer", {
sm::make_counter("dirty_journal_bytes",
[this] { return get_dirty_journal_size(); },
- sm::description("the size of the journal for dirty extents")),
+ sm::description("the size of the journal for dirty extents"),
+ {sm::label_instance("shard_store_index",
+ std::to_string(store_index))}),
sm::make_counter("alloc_journal_bytes",
[this] { return get_alloc_journal_size(); },
- sm::description("the size of the journal for alloc info"))
+ sm::description("the size of the journal for alloc info"),
+ {sm::label_instance("shard_store_index",
+ std::to_string(store_index))}),
});
}
}
SegmentCleaner::SegmentCleaner(
+ unsigned int store_index,
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
rewrite_gen_t max_rewrite_generation,
bool detailed,
bool is_cold)
- : detailed(detailed),
+ : store_index(store_index),
+ detailed(detailed),
is_cold(is_cold),
config(config),
sm_group(std::move(sm_group)),
metrics.add_group(prefix, {
sm::make_counter("segments_number",
[this] { return segments.get_num_segments(); },
- sm::description("the number of segments")),
+ sm::description("the number of segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segment_size",
[this] { return segments.get_segment_size(); },
- sm::description("the bytes of a segment")),
+ sm::description("the bytes of a segment"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_in_journal",
[this] { return get_segments_in_journal(); },
- sm::description("the number of segments in journal")),
+ sm::description("the number of segments in journal"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_type_journal",
[this] { return segments.get_num_type_journal(); },
- sm::description("the number of segments typed journal")),
+ sm::description("the number of segments typed journal"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_type_ool",
[this] { return segments.get_num_type_ool(); },
- sm::description("the number of segments typed out-of-line")),
+ sm::description("the number of segments typed out-of-line"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_open",
[this] { return segments.get_num_open(); },
- sm::description("the number of open segments")),
+ sm::description("the number of open segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_empty",
[this] { return segments.get_num_empty(); },
- sm::description("the number of empty segments")),
+ sm::description("the number of empty segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_closed",
[this] { return segments.get_num_closed(); },
- sm::description("the number of closed segments")),
+ sm::description("the number of closed segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_open_journal",
[this] { return segments.get_count_open_journal(); },
- sm::description("the count of open journal segment operations")),
+ sm::description("the count of open journal segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_open_ool",
[this] { return segments.get_count_open_ool(); },
- sm::description("the count of open ool segment operations")),
+ sm::description("the count of open ool segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_release_journal",
[this] { return segments.get_count_release_journal(); },
- sm::description("the count of release journal segment operations")),
+ sm::description("the count of release journal segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_release_ool",
[this] { return segments.get_count_release_ool(); },
- sm::description("the count of release ool segment operations")),
+ sm::description("the count of release ool segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_close_journal",
[this] { return segments.get_count_close_journal(); },
- sm::description("the count of close journal segment operations")),
+ sm::description("the count of close journal segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("segments_count_close_ool",
[this] { return segments.get_count_close_ool(); },
- sm::description("the count of close ool segment operations")),
+ sm::description("the count of close ool segment operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("total_bytes",
[this] { return segments.get_total_bytes(); },
- sm::description("the size of the space")),
+ sm::description("the size of the space"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("available_bytes",
[this] { return segments.get_available_bytes(); },
- sm::description("the size of the space is available")),
+ sm::description("the size of the space is available"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("unavailable_unreclaimable_bytes",
[this] { return get_unavailable_unreclaimable_bytes(); },
- sm::description("the size of the space is unavailable and unreclaimable")),
+ sm::description("the size of the space is unavailable and unreclaimable"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("unavailable_reclaimable_bytes",
[this] { return get_unavailable_reclaimable_bytes(); },
- sm::description("the size of the space is unavailable and reclaimable")),
+ sm::description("the size of the space is unavailable and reclaimable"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("used_bytes", stats.used_bytes,
- sm::description("the size of the space occupied by live extents")),
+ sm::description("the size of the space occupied by live extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("unavailable_unused_bytes",
[this] { return get_unavailable_unused_bytes(); },
- sm::description("the size of the space is unavailable and not alive")),
+ sm::description("the size of the space is unavailable and not alive"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("projected_count", stats.projected_count,
- sm::description("the number of projected usage reservations")),
+ sm::description("the number of projected usage reservations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("projected_used_bytes_sum", stats.projected_used_bytes_sum,
- sm::description("the sum of the projected usage in bytes")),
+ sm::description("the sum of the projected usage in bytes"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("reclaimed_bytes", stats.reclaimed_bytes,
- sm::description("rewritten bytes due to reclaim")),
+ sm::description("rewritten bytes due to reclaim"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("reclaimed_segment_bytes", stats.reclaimed_segment_bytes,
- sm::description("rewritten bytes due to reclaim")),
+ sm::description("rewritten bytes due to reclaim"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("closed_journal_used_bytes", stats.closed_journal_used_bytes,
- sm::description("used bytes when close a journal segment")),
+ sm::description("used bytes when close a journal segment"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("closed_journal_total_bytes", stats.closed_journal_total_bytes,
- sm::description("total bytes of closed journal segments")),
+ sm::description("total bytes of closed journal segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("closed_ool_used_bytes", stats.closed_ool_used_bytes,
- sm::description("used bytes when close a ool segment")),
+ sm::description("used bytes when close a ool segment"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("closed_ool_total_bytes", stats.closed_ool_total_bytes,
- sm::description("total bytes of closed ool segments")),
+ sm::description("total bytes of closed ool segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_gauge("available_ratio",
[this] { return segments.get_available_ratio(); },
- sm::description("ratio of available space to total space")),
+ sm::description("ratio of available space to total space"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_gauge("reclaim_ratio",
[this] { return get_reclaim_ratio(); },
- sm::description("ratio of reclaimable space to unavailable space")),
+ sm::description("ratio of reclaimable space to unavailable space"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_histogram("segment_utilization_distribution",
[this]() -> seastar::metrics::histogram& {
return stats.segment_util;
},
- sm::description("utilization distribution of all segments"))
+ sm::description("utilization distribution of all segments"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))})
});
}
}
RBMCleaner::RBMCleaner(
+ unsigned int store_index,
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed)
- : detailed(detailed),
+ : store_index(store_index),
+ detailed(detailed),
rb_group(std::move(rb_group)),
backref_manager(backref_manager)
{}
metrics.add_group("rbm_cleaner", {
sm::make_counter("total_bytes",
[this] { return get_total_bytes(); },
- sm::description("the size of the space")),
+ sm::description("the size of the space"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("available_bytes",
[this] { return get_total_bytes() - get_journal_bytes() - stats.used_bytes; },
- sm::description("the size of the space is available")),
+ sm::description("the size of the space is available"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("used_bytes", stats.used_bytes,
- sm::description("the size of the space occupied by live extents")),
+ sm::description("the size of the space occupied by live extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))})
});
}
};
JournalTrimmerImpl(
+ unsigned int store_index,
BackrefManager &backref_manager,
config_t config,
backend_type_t type,
seastar::future<> trim();
static JournalTrimmerImplRef create(
+ unsigned int store_index,
BackrefManager &backref_manager,
config_t config,
backend_type_t type,
device_off_t roll_start,
device_off_t roll_size) {
- return std::make_unique<JournalTrimmerImpl>(
+ return std::make_unique<JournalTrimmerImpl>(store_index,
backref_manager, config, type, roll_start, roll_size);
}
return std::min(get_max_dirty_bytes_to_trim(),
config.rewrite_dirty_bytes_per_cycle);
}
- void register_metrics();
+ void register_metrics(unsigned int store_index);
ExtentCallbackInterface *extent_callback = nullptr;
BackgroundListener *background_callback = nullptr;
};
SegmentCleaner(
+ unsigned int store_index,
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
}
static SegmentCleanerRef create(
+ unsigned int store_index,
config_t config,
SegmentManagerGroupRef&& sm_group,
BackrefManager &backref_manager,
rewrite_gen_t max_rewrite_generation,
bool detailed,
bool is_cold = false) {
- return std::make_unique<SegmentCleaner>(
+ return std::make_unique<SegmentCleaner>(store_index,
config, std::move(sm_group), backref_manager,
ool_seq_allocator, max_rewrite_generation,
detailed, is_cold);
}
}
+ unsigned int store_index;
const bool detailed;
const bool is_cold;
const config_t config;
class RBMCleaner : public AsyncCleaner {
public:
RBMCleaner(
+ unsigned int store_index,
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed);
static RBMCleanerRef create(
+ unsigned int store_index,
RBMDeviceGroupRef&& rb_group,
BackrefManager &backref_manager,
bool detailed) {
- return std::make_unique<RBMCleaner>(
+ return std::make_unique<RBMCleaner>(store_index,
std::move(rb_group), backref_manager, detailed);
}
private:
bool equals(const RBMSpaceTracker &other) const;
+ unsigned int store_index;
const bool detailed;
RBMDeviceGroupRef rb_group;
BackrefManager &backref_manager;
namespace crimson::os::seastore {
Cache::Cache(
- ExtentPlacementManager &epm)
+ ExtentPlacementManager &epm,
+ unsigned int store_index)
: epm(epm),
pinboard(create_extent_pinboard(
crimson::common::get_conf<Option::size_t>(
"seastore_cachepin_size_pershard")))
{
- register_metrics();
+ register_metrics(store_index);
segment_providers_by_device_id.resize(DEVICE_ID_MAX, nullptr);
}
DEBUG("exit");
}
-void Cache::register_metrics()
+void Cache::register_metrics(unsigned int store_index)
{
LOG_PREFIX(Cache::register_metrics);
DEBUG("");
namespace sm = seastar::metrics;
using src_t = Transaction::src_t;
- std::map<src_t, sm::label_instance> labels_by_src {
- {src_t::MUTATE, sm::label_instance("src", "MUTATE")},
- {src_t::READ, sm::label_instance("src", "READ")},
- {src_t::TRIM_DIRTY, sm::label_instance("src", "TRIM_DIRTY")},
- {src_t::TRIM_ALLOC, sm::label_instance("src", "TRIM_ALLOC")},
- {src_t::CLEANER_MAIN, sm::label_instance("src", "CLEANER_MAIN")},
- {src_t::CLEANER_COLD, sm::label_instance("src", "CLEANER_COLD")},
+ std::map<src_t, std::vector<sm::label_instance>> labels_by_src {
+ {src_t::MUTATE, {sm::label_instance("src", "MUTATE")}},
+ {src_t::READ, {sm::label_instance("src", "READ")}},
+ {src_t::TRIM_DIRTY, {sm::label_instance("src", "TRIM_DIRTY")}},
+ {src_t::TRIM_ALLOC, {sm::label_instance("src", "TRIM_ALLOC")}},
+ {src_t::CLEANER_MAIN, {sm::label_instance("src", "CLEANER_MAIN")}},
+ {src_t::CLEANER_COLD, {sm::label_instance("src", "CLEANER_COLD")}},
};
assert(labels_by_src.size() == (std::size_t)src_t::MAX);
- std::map<extent_types_t, sm::label_instance> labels_by_ext {
- {extent_types_t::ROOT, sm::label_instance("ext", "ROOT")},
- {extent_types_t::LADDR_INTERNAL, sm::label_instance("ext", "LADDR_INTERNAL")},
- {extent_types_t::LADDR_LEAF, sm::label_instance("ext", "LADDR_LEAF")},
- {extent_types_t::DINK_LADDR_LEAF, sm::label_instance("ext", "DINK_LADDR_LEAF")},
- {extent_types_t::ROOT_META, sm::label_instance("ext", "ROOT_META")},
- {extent_types_t::OMAP_INNER, sm::label_instance("ext", "OMAP_INNER")},
- {extent_types_t::OMAP_LEAF, sm::label_instance("ext", "OMAP_LEAF")},
- {extent_types_t::ONODE_BLOCK_STAGED, sm::label_instance("ext", "ONODE_BLOCK_STAGED")},
- {extent_types_t::COLL_BLOCK, sm::label_instance("ext", "COLL_BLOCK")},
- {extent_types_t::OBJECT_DATA_BLOCK, sm::label_instance("ext", "OBJECT_DATA_BLOCK")},
- {extent_types_t::RETIRED_PLACEHOLDER, sm::label_instance("ext", "RETIRED_PLACEHOLDER")},
- {extent_types_t::ALLOC_INFO, sm::label_instance("ext", "ALLOC_INFO")},
- {extent_types_t::JOURNAL_TAIL, sm::label_instance("ext", "JOURNAL_TAIL")},
- {extent_types_t::TEST_BLOCK, sm::label_instance("ext", "TEST_BLOCK")},
- {extent_types_t::TEST_BLOCK_PHYSICAL, sm::label_instance("ext", "TEST_BLOCK_PHYSICAL")},
- {extent_types_t::BACKREF_INTERNAL, sm::label_instance("ext", "BACKREF_INTERNAL")},
- {extent_types_t::BACKREF_LEAF, sm::label_instance("ext", "BACKREF_LEAF")}
+ std::map<extent_types_t, std::vector<sm::label_instance>> labels_by_ext {
+ {extent_types_t::ROOT, {sm::label_instance("ext", "ROOT")}},
+ {extent_types_t::LADDR_INTERNAL, {sm::label_instance("ext", "LADDR_INTERNAL")}},
+ {extent_types_t::LADDR_LEAF, {sm::label_instance("ext", "LADDR_LEAF")}},
+ {extent_types_t::DINK_LADDR_LEAF, {sm::label_instance("ext", "DINK_LADDR_LEAF")}},
+ {extent_types_t::ROOT_META, {sm::label_instance("ext", "ROOT_META")}},
+ {extent_types_t::OMAP_INNER, {sm::label_instance("ext", "OMAP_INNER")}},
+ {extent_types_t::OMAP_LEAF, {sm::label_instance("ext", "OMAP_LEAF")}},
+ {extent_types_t::ONODE_BLOCK_STAGED, {sm::label_instance("ext", "ONODE_BLOCK_STAGED")}},
+ {extent_types_t::COLL_BLOCK, {sm::label_instance("ext", "COLL_BLOCK")}},
+ {extent_types_t::OBJECT_DATA_BLOCK, {sm::label_instance("ext", "OBJECT_DATA_BLOCK")}},
+ {extent_types_t::RETIRED_PLACEHOLDER, {sm::label_instance("ext", "RETIRED_PLACEHOLDER")}},
+ {extent_types_t::ALLOC_INFO, {sm::label_instance("ext", "ALLOC_INFO")}},
+ {extent_types_t::JOURNAL_TAIL, {sm::label_instance("ext", "JOURNAL_TAIL")}},
+ {extent_types_t::TEST_BLOCK, {sm::label_instance("ext", "TEST_BLOCK")}},
+ {extent_types_t::TEST_BLOCK_PHYSICAL, {sm::label_instance("ext", "TEST_BLOCK_PHYSICAL")}},
+ {extent_types_t::BACKREF_INTERNAL, {sm::label_instance("ext", "BACKREF_INTERNAL")}},
+ {extent_types_t::BACKREF_LEAF, {sm::label_instance("ext", "BACKREF_LEAF")}}
};
assert(labels_by_ext.size() == (std::size_t)extent_types_t::NONE);
-
+ for (auto& [src, src_label] : labels_by_src) {
+ src_label.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+ }
+ for (auto& [ext, ext_label] : labels_by_ext) {
+ ext_label.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+ }
/*
* trans_created
*/
[this] {
return stats.access.get_cache_access();
},
- sm::description("total number of cache accesses")
+ sm::description("total number of cache accesses"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"cache_hit",
[this] {
return stats.access.get_cache_hit();
},
- sm::description("total number of cache hits")
+ sm::description("total number of cache hits"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"refresh_parent_total",
cursor_stats.num_refresh_parent_total,
- sm::description("total number of refreshed cursors")
+ sm::description("total number of refreshed cursors"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"refresh_invalid_parent",
cursor_stats.num_refresh_invalid_parent,
- sm::description("total number of refreshed cursors with invalid parents")
+ sm::description("total number of refreshed cursors with invalid parents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"refresh_unviewable_parent",
cursor_stats.num_refresh_unviewable_parent,
- sm::description("total number of refreshed cursors with unviewable parents")
+ sm::description("total number of refreshed cursors with unviewable parents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"refresh_modified_viewable_parent",
cursor_stats.num_refresh_modified_viewable_parent,
- sm::description("total number of refreshed cursors with viewable but modified parents")
+ sm::description("total number of refreshed cursors with viewable but modified parents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
auto& efforts = get_by_src(stats.invalidated_efforts_by_src, src);
for (auto& [ext, ext_label] : labels_by_ext) {
auto& counter = get_by_ext(efforts.num_trans_invalidated, ext);
+ std::vector<sm::label_instance> merged_labels = src_label;
+ merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
metrics.add_group(
"cache",
{
"trans_invalidated_by_extent",
counter,
sm::description("total number of transactions invalidated by extents"),
- {src_label, ext_label}
+ merged_labels
),
}
);
if (src == src_t::READ) {
// read transaction won't have non-read efforts
auto read_effort_label = effort_label("READ");
+ std::vector<sm::label_instance> merged_labels = src_label;
+ merged_labels.push_back(read_effort_label);
metrics.add_group(
"cache",
{
"invalidated_extents",
efforts.read.num,
sm::description("extents of invalidated transactions"),
- {src_label, read_effort_label}
+ merged_labels
),
sm::make_counter(
"invalidated_extent_bytes",
efforts.read.bytes,
sm::description("extent bytes of invalidated transactions"),
- {src_label, read_effort_label}
+ merged_labels
),
}
);
return efforts.fresh_ool_written;
}
}();
+ std::vector<sm::label_instance> merged_labels = src_label;
+ merged_labels.push_back(effort_label(effort_name));
metrics.add_group(
"cache",
{
"invalidated_extents",
effort.num,
sm::description("extents of invalidated transactions"),
- {src_label, effort_label(effort_name)}
+ merged_labels
),
sm::make_counter(
"invalidated_extent_bytes",
effort.bytes,
sm::description("extent bytes of invalidated transactions"),
- {src_label, effort_label(effort_name)}
+ merged_labels
),
}
);
return efforts.fresh_ool_by_ext;
}
}();
+ std::vector<sm::label_instance> merged_labels = src_label;
+ merged_labels.push_back(effort_label(effort_name));
for (auto& [ext, ext_label] : labels_by_ext) {
+ merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
auto& effort = get_by_ext(effort_by_ext, ext);
metrics.add_group(
"cache",
"committed_extents",
effort.num,
sm::description("extents of committed transactions"),
- {src_label, effort_label(effort_name), ext_label}
+ merged_labels
),
sm::make_counter(
"committed_extent_bytes",
effort.bytes,
sm::description("extent bytes of committed transactions"),
- {src_label, effort_label(effort_name), ext_label}
+ merged_labels
),
}
);
auto& delta_by_ext = efforts.delta_bytes_by_ext;
for (auto& [ext, ext_label] : labels_by_ext) {
+ std::vector<sm::label_instance> merged_labels = src_label;
+ merged_labels.insert(merged_labels.end(), ext_label.begin(), ext_label.end());
auto& value = get_by_ext(delta_by_ext, ext);
metrics.add_group(
"cache",
"committed_delta_bytes",
value,
sm::description("delta bytes of committed transactions"),
- {src_label, ext_label}
+ merged_labels
),
}
);
sm::make_counter(
"trans_read_successful",
stats.success_read_efforts.num_trans,
- sm::description("total number of successful read transactions")
+ sm::description("total number of successful read transactions"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"successful_read_extents",
stats.success_read_efforts.read.num,
- sm::description("extents of successful read transactions")
+ sm::description("extents of successful read transactions"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"successful_read_extent_bytes",
stats.success_read_efforts.read.bytes,
- sm::description("extent bytes of successful read transactions")
+ sm::description("extent bytes of successful read transactions"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
[this] {
return extents_index.size();
},
- sm::description("total number of cached extents")
+ sm::description("total number of cached extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"cached_extent_bytes",
[this] {
return extents_index.get_bytes();
},
- sm::description("total bytes of cached extents")
+ sm::description("total bytes of cached extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"dirty_extents",
[this] {
return dirty.size();
},
- sm::description("total number of dirty extents")
+ sm::description("total number of dirty extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"dirty_extent_bytes",
stats.dirty_bytes,
- sm::description("total bytes of dirty extents")
+ sm::description("total bytes of dirty extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
- pinboard->register_metrics();
+ pinboard->register_metrics(store_index);
/**
* tree stats
auto omap_label = tree_label("OMAP");
auto lba_label = tree_label("LBA");
auto backref_label = tree_label("BACKREF");
- auto register_tree_metrics = [&labels_by_src, &onode_label, &omap_label, this](
+ auto register_tree_metrics = [&labels_by_src, &onode_label, &omap_label, store_index, this](
const sm::label_instance& tree_label,
uint64_t& tree_depth,
int64_t& tree_extents_num,
"tree_depth",
tree_depth,
sm::description("the depth of tree"),
- {tree_label}
+ {tree_label, sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"tree_extents_num",
tree_extents_num,
sm::description("num of extents of the tree"),
- {tree_label}
+ {tree_label, sm::label_instance("shard_store_index", std::to_string(store_index))}
)
}
);
}
auto& committed_efforts = get_by_src(committed_tree_efforts, src);
auto& invalidated_efforts = get_by_src(invalidated_tree_efforts, src);
+ std::vector<sm::label_instance> merged_labels = {tree_label}; // Copy src_label
+ merged_labels.insert(merged_labels.end(), src_label.begin(), src_label.end());
metrics.add_group(
"cache",
{
"tree_inserts_committed",
committed_efforts.num_inserts,
sm::description("total number of committed insert operations"),
- {tree_label, src_label}
+ merged_labels
),
sm::make_counter(
"tree_erases_committed",
committed_efforts.num_erases,
sm::description("total number of committed erase operations"),
- {tree_label, src_label}
+ merged_labels
),
sm::make_counter(
"tree_updates_committed",
committed_efforts.num_updates,
sm::description("total number of committed update operations"),
- {tree_label, src_label}
+ merged_labels
),
sm::make_counter(
"tree_inserts_invalidated",
invalidated_efforts.num_inserts,
sm::description("total number of invalidated insert operations"),
- {tree_label, src_label}
+ merged_labels
),
sm::make_counter(
"tree_erases_invalidated",
invalidated_efforts.num_erases,
sm::description("total number of invalidated erase operations"),
- {tree_label, src_label}
+ merged_labels
),
sm::make_counter(
"tree_updates_invalidated",
invalidated_efforts.num_updates,
sm::description("total number of invalidated update operations"),
- {tree_label, src_label}
+ merged_labels
),
}
);
"trans_srcs_invalidated",
stats.trans_conflicts_by_srcs[srcs_index - 1],
sm::description("total number conflicted transactions by src pair"),
- {srcs_label(oss.str())}
+ {srcs_label(oss.str()),
+ sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
"trans_srcs_invalidated",
stats.trans_conflicts_by_unknown[srcs_index - 1],
sm::description("total number conflicted transactions by src pair"),
- {srcs_label(oss.str())}
+ {srcs_label(oss.str()),
+ sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
[this] {
return stats.trim_rewrites.get_num_rewrites();
},
- sm::description("total number of rewrite-dirty extents")
+ sm::description("total number of rewrite-dirty extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"version_sum_dirty",
stats.trim_rewrites.dirty_version,
- sm::description("sum of the version from rewrite-dirty extents")
+ sm::description("sum of the version from rewrite-dirty extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"version_count_reclaim",
[this] {
return stats.reclaim_rewrites.get_num_rewrites();
},
- sm::description("total number of rewrite-reclaim extents")
+ sm::description("total number of rewrite-reclaim extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"version_sum_reclaim",
stats.reclaim_rewrites.dirty_version,
- sm::description("sum of the version from rewrite-reclaim extents")
+ sm::description("sum of the version from rewrite-reclaim extents"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
class Cache : public ExtentTransViewRetriever,
public RetiredExtentPlaceholderInvalidater {
public:
- Cache(ExtentPlacementManager &epm);
+ Cache(ExtentPlacementManager &epm, unsigned int store_index);
~Cache();
cache_stats_t get_stats(bool report_detail, double seconds) const;
}
seastar::metrics::metric_group metrics;
- void register_metrics();
+ void register_metrics(unsigned int store_index);
void apply_backref_mset(
backref_entry_refs_t& backref_entries) {
return lru.get_current_num_extents();
}
- void register_metrics() final {
+ void register_metrics(unsigned int store_index) final {
namespace sm = seastar::metrics;
metrics.add_group(
"cache",
[this] {
return get_current_size_bytes();
},
- sm::description("total bytes pinned by the lru")
+ sm::description("total bytes pinned by the lru"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"lru_num_extents",
[this] {
return get_current_num_extents();
},
- sm::description("total extents pinned by the lru")
+ sm::description("total extents pinned by the lru"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"lru_hit", hit,
- sm::description("total count of the extents that are linked to lru when touching them")
+ sm::description("total count of the extents that are linked to lru when touching them"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"lru_miss", miss,
- sm::description("total count of the extents that are not linked to lru when touching them")
+ sm::description("total count of the extents that are not linked to lru when touching them"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
return warm_in.get_current_num_extents() + hot.get_current_num_extents();
}
- void register_metrics() final;
+ void register_metrics(unsigned int store_index) final;
void get_stats(
cache_stats_t &stats,
last_hits = overall_hits;
}
-void ExtentPinboardTwoQ::register_metrics() {
+void ExtentPinboardTwoQ::register_metrics(unsigned int store_index) {
namespace sm = seastar::metrics;
metrics.add_group(
"cache",
[this] {
return warm_in.get_current_size_bytes();
},
- sm::description("total bytes pinned by the 2q warm_in queue")
+ sm::description("total bytes pinned by the 2q warm_in queue"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"2q_warm_in_num_extents",
[this] {
return warm_in.get_current_num_extents();
},
- sm::description("total extents pinned by the 2q warm_in queue")
+ sm::description("total extents pinned by the 2q warm_in queue"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"2q_hot_size_bytes",
[this] {
return hot.get_current_size_bytes();
},
- sm::description("total bytes pinned by the 2q hot queue")
+ sm::description("total bytes pinned by the 2q hot queue"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"2q_hot_num_extents",
[this] {
return hot.get_current_num_extents();
},
- sm::description("total extents pinned by the 2q hot queue")
+ sm::description("total extents pinned by the 2q hot queue"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"2q_hit", hit,
- sm::description("total count of the extents that are linked to 2Q when touching them")
+ sm::description("total count of the extents that are linked to 2Q when touching them"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"2q_miss", miss,
- sm::description("total count of the extents that are not linked to 2Q when touching them")
+ sm::description("total count of the extents that are not linked to 2Q when touching them"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
struct ExtentPinboard {
virtual ~ExtentPinboard() = default;
- virtual void register_metrics() = 0;
+ virtual void register_metrics(unsigned int store_index) = 0;
virtual void move_to_top(
CachedExtent &extent,
const Transaction::src_t *p_src,
namespace crimson::os::seastore {
SegmentedOolWriter::SegmentedOolWriter(
+ unsigned int store_index,
data_category_t category,
rewrite_gen_t gen,
SegmentProvider& sp,
SegmentSeqAllocator &ssa)
- : segment_allocator(nullptr, category, gen, sp, ssa),
+ : store_index(store_index),
+ segment_allocator(nullptr, category, gen, sp, ssa),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
// DATA
data_writers_by_gen.resize(num_writers, nullptr);
for (rewrite_gen_t gen = OOL_GENERATION; gen < hot_tier_generations; ++gen) {
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
data_category_t::DATA, gen, *segment_cleaner,
*ool_segment_seq_allocator));
data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
// METADATA
md_writers_by_gen.resize(num_writers, {});
for (rewrite_gen_t gen = OOL_GENERATION; gen < hot_tier_generations; ++gen) {
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
data_category_t::METADATA, gen, *segment_cleaner,
*ool_segment_seq_allocator));
md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
if (cold_segment_cleaner) {
// Cold DATA Segments
for (rewrite_gen_t gen = hot_tier_generations; gen <= dynamic_max_rewrite_generation; ++gen) {
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
data_category_t::DATA, gen, *cold_segment_cleaner,
*ool_segment_seq_allocator));
data_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
for (rewrite_gen_t gen = hot_tier_generations; gen <= dynamic_max_rewrite_generation; ++gen) {
// Cold METADATA Segments
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(store_index,
data_category_t::METADATA, gen, *cold_segment_cleaner,
*ool_segment_seq_allocator));
md_writers_by_gen[generation_to_writer(gen)] = writer_refs.back().get();
}
}
-ExtentPlacementManager::mount_ret ExtentPlacementManager::BackgroundProcess::mount() {
+ExtentPlacementManager::mount_ret ExtentPlacementManager::BackgroundProcess::mount(unsigned int store_index) {
LOG_PREFIX(BackgroundProcess::mount);
DEBUG("start");
ceph_assert(state == state_t::STOP);
state = state_t::MOUNT;
trimmer->reset();
stats = {};
- register_metrics();
+ register_metrics(store_index);
DEBUG("mounting main cleaner");
co_await main_cleaner->mount();
if (has_cold_tier()) {
}
}
-void ExtentPlacementManager::BackgroundProcess::register_metrics()
+void ExtentPlacementManager::BackgroundProcess::register_metrics(unsigned int store_index)
{
namespace sm = seastar::metrics;
metrics.add_group("background_process", {
sm::make_counter("io_count", stats.io_count,
- sm::description("the sum of IOs")),
+ sm::description("the sum of IOs"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_blocked_count", stats.io_blocked_count,
- sm::description("IOs that are blocked by gc")),
+ sm::description("IOs that are blocked by gc"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_blocked_count_trim", stats.io_blocked_count_trim,
- sm::description("IOs that are blocked by trimming")),
+ sm::description("IOs that are blocked by trimming"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_retried_blocked_count_clean", stats.io_blocked_count_clean,
- sm::description("Retried IOs that are blocked by cleaning")),
+ sm::description("Retried IOs that are blocked by cleaning"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_retried_blocked_count_trim", stats.io_blocked_count_trim,
- sm::description("Retried IOs that are blocked by trimming")),
+ sm::description("Retried IOs that are blocked by trimming"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_blocked_count_clean", stats.io_blocked_count_clean,
- sm::description("IOs that are blocked by cleaning")),
+ sm::description("IOs that are blocked by cleaning"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_blocked_sum", stats.io_blocked_sum,
- sm::description("the sum of blocking IOs")),
+ sm::description("the sum of blocking IOs"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}),
sm::make_counter("io_blocked_time", stats.io_blocked_time,
- sm::description("the sum of the time(ms) in which IOs are blocked"))
+ sm::description("the sum of the time(ms) in which IOs are blocked"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))})
});
}
*/
class SegmentedOolWriter : public ExtentOolWriter {
public:
- SegmentedOolWriter(data_category_t category,
+ SegmentedOolWriter(unsigned int store_index,
+ data_category_t category,
rewrite_gen_t gen,
SegmentProvider &sp,
SegmentSeqAllocator &ssa);
}
open_ertr::future<> open() final {
- return record_submitter.open(false).discard_result();
+ return record_submitter.open(store_index, false).discard_result();
}
alloc_write_iertr::future<> alloc_write_ool_extents(
std::list<LogicalCachedExtentRef> &&extents,
bool with_atomic_roll_segment=false);
+ unsigned int store_index;
journal::SegmentAllocator segment_allocator;
journal::RecordSubmitter record_submitter;
seastar::gate write_guard;
public:
ExtentPlacementManager(
rewrite_gen_t hot_tier_generations,
- rewrite_gen_t cold_tier_generations)
+ rewrite_gen_t cold_tier_generations,
+ unsigned int store_index)
: hot_tier_generations(hot_tier_generations),
cold_tier_generations(cold_tier_generations),
dynamic_max_rewrite_generation(cold_tier_generations),
+ store_index(store_index),
ool_segment_seq_allocator(
std::make_unique<SegmentSeqAllocator>(segment_type_t::OOL)),
max_data_allocation_size(crimson::common::get_conf<Option::size_t>(
crimson::ct_error::input_output_error>;
using mount_ret = mount_ertr::future<>;
mount_ret mount() {
- return background_process.mount();
+ return background_process.mount(store_index);
}
using open_ertr = ExtentOolWriter::open_ertr;
return stat;
}
- ExtentPlacementManager::mount_ret mount();
+ ExtentPlacementManager::mount_ret mount(unsigned int store_index);
void start_scan_space() {
ceph_assert(state == state_t::MOUNT);
seastar::future<> do_background_cycle();
- void register_metrics();
+ void register_metrics(unsigned int store_index);
struct {
uint64_t io_blocking_num = 0;
const rewrite_gen_t cold_tier_generations = NULL_GENERATION;
rewrite_gen_t dynamic_max_rewrite_generation = NULL_GENERATION;
BackgroundProcess background_process;
+ unsigned int store_index = 0;
// TODO: drop once paddr->journal_seq_t is introduced
SegmentSeqAllocatorRef ool_segment_seq_allocator;
extent_len_t max_data_allocation_size = 0;
namespace crimson::os::seastore::journal {
JournalRef make_segmented(
+ unsigned int store_index,
SegmentProvider &provider,
JournalTrimmer &trimmer)
{
- return std::make_unique<SegmentedJournal>(provider, trimmer);
+ return std::make_unique<SegmentedJournal>(store_index, provider, trimmer);
}
JournalRef make_circularbounded(
+ unsigned int store_index,
JournalTrimmer &trimmer,
crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path)
{
- return std::make_unique<CircularBoundedJournal>(trimmer, device, path);
+ return std::make_unique<CircularBoundedJournal>(store_index, trimmer, device, path);
}
}
namespace journal {
JournalRef make_segmented(
+ unsigned int store_index,
SegmentProvider &provider,
JournalTrimmer &trimmer);
JournalRef make_circularbounded(
+ unsigned int store_index,
JournalTrimmer &trimmer,
crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path);
namespace crimson::os::seastore::journal {
CircularBoundedJournal::CircularBoundedJournal(
+ unsigned int store_index,
JournalTrimmer &trimmer,
RBMDevice* device,
const std::string &path)
- : trimmer(trimmer), path(path),
+ : store_index(store_index),
+ trimmer(trimmer), path(path),
cjs(device),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
CircularBoundedJournal::open_for_mkfs_ret
CircularBoundedJournal::open_for_mkfs()
{
- return record_submitter.open(true
+ return record_submitter.open(store_index, true
).safe_then([this](auto ret) {
return open_for_mkfs_ret(
open_for_mkfs_ertr::ready_future_marker{},
CircularBoundedJournal::open_for_mount_ret
CircularBoundedJournal::open_for_mount()
{
- return record_submitter.open(false
+ return record_submitter.open(store_index, false
).safe_then([this](auto ret) {
return open_for_mount_ret(
open_for_mount_ertr::ready_future_marker{},
class CircularBoundedJournal : public Journal, RecordScanner {
public:
CircularBoundedJournal(
+ unsigned int store_index,
JournalTrimmer &trimmer, RBMDevice* device, const std::string &path);
~CircularBoundedJournal() {}
}
private:
+ unsigned int store_index;
JournalTrimmer &trimmer;
std::string path;
WritePipeline *write_pipeline = nullptr;
}
RecordSubmitter::open_ret
-RecordSubmitter::open(bool is_mkfs)
+RecordSubmitter::open(unsigned int store_index, bool is_mkfs)
{
return journal_allocator.open(is_mkfs
- ).safe_then([this](journal_seq_t ret) {
+ ).safe_then([this, store_index](journal_seq_t ret) {
LOG_PREFIX(RecordSubmitter::open);
DEBUG("{} register metrics", get_name());
stats = {};
namespace sm = seastar::metrics;
std::vector<sm::label_instance> label_instances;
label_instances.push_back(sm::label_instance("submitter", get_name()));
+ label_instances.push_back(sm::label_instance("shard_store_index", std::to_string(store_index)));
+
metrics.add_group(
"journal",
{
// open for write, generate the correct print name, and register metrics
using open_ertr = base_ertr;
using open_ret = open_ertr::future<journal_seq_t>;
- open_ret open(bool is_mkfs);
+ open_ret open(unsigned int store_index, bool is_mkfs);
using close_ertr = base_ertr;
close_ertr::future<> close();
namespace crimson::os::seastore::journal {
SegmentedJournal::SegmentedJournal(
+ unsigned int store_index,
SegmentProvider &segment_provider,
JournalTrimmer &trimmer)
- : segment_seq_allocator(
+ : store_index(store_index),
+ segment_seq_allocator(
new SegmentSeqAllocator(segment_type_t::JOURNAL)),
journal_segment_allocator(&trimmer,
data_category_t::METADATA,
SegmentedJournal::open_for_mkfs_ret
SegmentedJournal::open_for_mkfs()
{
- return record_submitter.open(true);
+ return record_submitter.open(store_index, true);
}
SegmentedJournal::open_for_mount_ret
SegmentedJournal::open_for_mount()
{
- return record_submitter.open(false);
+ return record_submitter.open(store_index, false);
}
SegmentedJournal::close_ertr::future<> SegmentedJournal::close()
class SegmentedJournal : public Journal {
public:
SegmentedJournal(
+ unsigned int store_index,
SegmentProvider &segment_provider,
JournalTrimmer &trimmer);
~SegmentedJournal() {}
OrderingHandle &handle,
on_submission_func_t &&on_submission);
+ unsigned int store_index;
SegmentSeqAllocatorRef segment_seq_allocator;
SegmentAllocator journal_segment_allocator;
RecordSubmitter record_submitter;
});
}
-void BtreeLBAManager::register_metrics()
+void BtreeLBAManager::register_metrics(unsigned int store_index)
{
LOG_PREFIX(BtreeLBAManager::register_metrics);
DEBUG("start");
sm::make_counter(
"alloc_extents",
stats.num_alloc_extents,
- sm::description("total number of lba alloc_extent operations")
+ sm::description("total number of lba alloc_extent operations"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
sm::make_counter(
"alloc_extents_iter_nexts",
stats.num_alloc_extents_iter_nexts,
- sm::description("total number of iterator next operations during extent allocation")
+ sm::description("total number of iterator next operations during extent allocation"),
+ {sm::label_instance("shard_store_index", std::to_string(store_index))}
),
}
);
*/
class BtreeLBAManager : public LBAManager {
public:
- BtreeLBAManager(Cache &cache)
+ BtreeLBAManager(Cache &cache, unsigned int store_index)
: cache(cache)
{
- register_metrics();
+ register_metrics(store_index);
}
mkfs_ret mkfs(
}
seastar::metrics::metric_group metrics;
- void register_metrics();
+ void register_metrics(unsigned int store_index);
struct update_mapping_ret_bare_t {
update_mapping_ret_bare_t()
namespace crimson::os::seastore {
-LBAManagerRef lba::create_lba_manager(Cache &cache) {
- return LBAManagerRef(new lba::BtreeLBAManager(cache));
+LBAManagerRef lba::create_lba_manager(Cache &cache, unsigned int store_index) {
+ return LBAManagerRef(new lba::BtreeLBAManager(cache, store_index));
}
}
class Cache;
namespace lba {
-LBAManagerRef create_lba_manager(Cache &cache);
+LBAManagerRef create_lba_manager(Cache &cache, unsigned int store_index);
}
}
Device *primary_device,
const std::vector<Device*> &secondary_devices,
shard_stats_t& shard_stats,
+ unsigned int store_index,
bool is_test)
{
rewrite_gen_t hot_tier_generations = crimson::common::get_conf<uint64_t>(
rewrite_gen_t cold_tier_generations = crimson::common::get_conf<uint64_t>(
"seastore_cold_tier_generations");
auto epm = std::make_unique<ExtentPlacementManager>(
- hot_tier_generations, cold_tier_generations);
- auto cache = std::make_unique<Cache>(*epm);
- auto lba_manager = lba::create_lba_manager(*cache);
+ hot_tier_generations, cold_tier_generations, store_index);
+ auto cache = std::make_unique<Cache>(*epm, store_index);
+ auto lba_manager = lba::create_lba_manager(*cache, store_index);
auto sms = std::make_unique<SegmentManagerGroup>();
auto rbs = std::make_unique<RBMDeviceGroup>();
auto backref_manager = create_backref_manager(*cache);
}
auto journal_trimmer = JournalTrimmerImpl::create(
+ store_index,
*backref_manager, trimmer_config,
backend_type, roll_start, roll_size);
if (cold_sms) {
cold_segment_cleaner = SegmentCleaner::create(
+ store_index,
cleaner_config,
std::move(cold_sms),
*backref_manager,
if (backend_type == backend_type_t::SEGMENTED) {
cleaner = SegmentCleaner::create(
+ store_index,
cleaner_config,
std::move(sms),
*backref_manager,
}
segment_cleaner->set_journal_trimmer(*journal_trimmer);
journal = journal::make_segmented(
+ store_index,
*segment_cleaner,
*journal_trimmer);
} else {
cleaner = RBMCleaner::create(
+ store_index,
std::move(rbs),
*backref_manager,
cleaner_is_detailed);
journal = journal::make_circularbounded(
+ store_index,
*journal_trimmer,
static_cast<random_block_device::RBMDevice*>(primary_device),
"");
Device *primary_device,
const std::vector<Device*> &secondary_devices,
shard_stats_t& shard_stats,
+ unsigned int store_index,
bool is_test);
}