SegmentSeqAllocatorRef ool_segment_seq_allocator;
};
+ class RBMCleaner;
+ using RBMCleanerRef = std::unique_ptr<RBMCleaner>;
+
+ class RBMCleaner : public AsyncCleaner {
+ public:
+ RBMCleaner(
+ RBMDeviceGroupRef&& rb_group,
+ BackrefManager &backref_manager,
+ bool detailed);
+
+ static RBMCleanerRef create(
+ RBMDeviceGroupRef&& rb_group,
+ BackrefManager &backref_manager,
+ bool detailed) {
+ return std::make_unique<RBMCleaner>(
+ std::move(rb_group), backref_manager, detailed);
+ }
+
+ RBMDeviceGroup* get_rb_group() {
+ return rb_group.get();
+ }
+
+ /*
+ * AsyncCleaner interfaces
+ */
+
+ void set_background_callback(BackgroundListener *cb) final {
+ background_callback = cb;
+ }
+
+ void set_extent_callback(ExtentCallbackInterface *cb) final {
+ extent_callback = cb;
+ }
+
+ store_statfs_t get_stat() const final {
+ store_statfs_t st;
+ // TODO
+ return st;
+ }
+
+ void print(std::ostream &, bool is_detailed) const final;
+
+ mount_ret mount() final;
+
+ void mark_space_used(paddr_t, extent_len_t) final;
+
+ void mark_space_free(paddr_t, extent_len_t) final;
+
+ void commit_space_used(paddr_t, extent_len_t) final;
+
+ void reserve_projected_usage(std::size_t) final;
+
+ void release_projected_usage(size_t) final;
+
+ bool should_block_io_on_clean() const final {
+ return false;
+ }
+
+ bool should_clean_space() const final {
+ return false;
+ }
+
+ clean_space_ret clean_space() final;
+
+ RandomBlockManager* get_rbm(paddr_t paddr) {
+ auto rbs = rb_group->get_rb_managers();
+ for (auto p : rbs) {
+ if (p->get_device_id() == paddr.get_device_id()) {
+ return p;
+ }
+ }
+ return nullptr;
+ }
+
+ paddr_t alloc_paddr(extent_len_t length) {
+ // TODO: implement allocation strategy (dirty metadata and multiple devices)
+ auto rbs = rb_group->get_rb_managers();
+ return rbs[0]->alloc_extent(length);
+ }
+
+ // Testing interfaces
+
+ bool check_usage() final;
+
+ bool check_usage_is_empty() const final {
+ // TODO
+ return true;
+ }
+
+ private:
+ bool equals(const RBMSpaceTracker &other) const;
+
+ const bool detailed;
+ RBMDeviceGroupRef rb_group;
+ BackrefManager &backref_manager;
+
+ struct {
+ /**
+ * used_bytes
+ *
+ * Bytes occupied by live extents
+ */
+ uint64_t used_bytes = 0;
+
+ /**
+ * projected_used_bytes
+ *
+ * Sum of projected bytes used by each transaction between throttle
+ * acquisition and commit completion. See reserve_projected_usage()
+ */
+ uint64_t projected_used_bytes = 0;
+ } stats;
+
+ ExtentCallbackInterface *extent_callback = nullptr;
+ BackgroundListener *background_callback = nullptr;
+ };
}
+
+#if FMT_VERSION >= 90000
+template <> struct fmt::formatter<crimson::os::seastore::segment_info_t> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::segments_info_t> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::AsyncCleaner::stat_printer_t> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::JournalTrimmerImpl::stat_printer_t> : fmt::ostream_formatter {};
+#endif
assert(is_target_rewrite_generation(gen));
assert(gen == INIT_GENERATION || hint == placement_hint_t::REWRITE);
- // XXX: bp might be extended to point to differnt memory (e.g. PMem)
+ // XXX: bp might be extended to point to different memory (e.g. PMem)
// according to the allocator.
- auto bp = ceph::bufferptr(
+ auto alloc_paddr = [this](rewrite_gen_t gen,
+ data_category_t category, extent_len_t length)
+ -> alloc_result_t {
+ auto bp = ceph::bufferptr(
buffer::create_page_aligned(length));
- bp.zero();
+ bp.zero();
+ paddr_t addr;
+ if (gen == INLINE_GENERATION) {
+ addr = make_record_relative_paddr(0);
+ } else if (category == data_category_t::DATA) {
+ assert(data_writers_by_gen[generation_to_writer(gen)]);
+ addr = data_writers_by_gen[
+ generation_to_writer(gen)]->alloc_paddr(length);
+ } else {
+ assert(category == data_category_t::METADATA);
+ assert(md_writers_by_gen[generation_to_writer(gen)]);
+ addr = md_writers_by_gen[
+ generation_to_writer(gen)]->alloc_paddr(length);
+ }
+ return {addr,
+ std::move(bp),
+ gen};
+ };
if (!is_logical_type(type)) {
// TODO: implement out-of-line strategy for physical extent.