os << ")";
}
+RBMCleaner::RBMCleaner(
+ BackrefManager &backref_manager,
+ bool detailed)
+ : detailed(detailed),
+ backref_manager(backref_manager)
+{}
+
+void RBMCleaner::print(std::ostream &os, bool is_detailed) const
+{
+ // TODO
+ return;
+}
+
+void RBMCleaner::mark_space_used(
+ paddr_t addr,
+ extent_len_t len)
+{
+ // TODO
+ return;
+}
+
+void RBMCleaner::mark_space_free(
+ paddr_t addr,
+ extent_len_t len)
+{
+ // TODO
+ return;
+}
+
+void RBMCleaner::reserve_projected_usage(std::size_t projected_usage)
+{
+ assert(background_callback->is_ready());
+ stats.projected_used_bytes += projected_usage;
+}
+
+void RBMCleaner::release_projected_usage(std::size_t projected_usage)
+{
+ assert(background_callback->is_ready());
+ ceph_assert(stats.projected_used_bytes >= projected_usage);
+ stats.projected_used_bytes -= projected_usage;
+ background_callback->maybe_wake_blocked_io();
+}
+
+RBMCleaner::clean_space_ret RBMCleaner::clean_space()
+{
+ // TODO
+ return clean_space_ertr::now();
+}
+
+RBMCleaner::mount_ret RBMCleaner::mount()
+{
+ stats = {};
+ return mount_ertr::now();
+}
+
}
SegmentSeqAllocatorRef ool_segment_seq_allocator;
};
+class RBMCleaner;
+using RBMCleanerRef = std::unique_ptr<RBMCleaner>;
+
+class RBMCleaner : public AsyncCleaner {
+public:
+ RBMCleaner(
+ BackrefManager &backref_manager,
+ bool detailed);
+
+ static RBMCleanerRef create(
+ BackrefManager &backref_manager,
+ bool detailed) {
+ return std::make_unique<RBMCleaner>(
+ backref_manager, detailed);
+ }
+
+ /*
+ * AsyncCleaner interfaces
+ */
+
+ void set_background_callback(BackgroundListener *cb) final {
+ background_callback = cb;
+ }
+
+ void set_extent_callback(ExtentCallbackInterface *cb) final {
+ extent_callback = cb;
+ }
+
+ store_statfs_t get_stat() const final {
+ store_statfs_t st;
+ // TODO
+ return st;
+ }
+
+ void print(std::ostream &, bool is_detailed) const final;
+
+ mount_ret mount() final;
+
+ void mark_space_used(paddr_t, extent_len_t) final;
+
+ void mark_space_free(paddr_t, extent_len_t) final;
+
+ void reserve_projected_usage(std::size_t) final;
+
+ void release_projected_usage(size_t) final;
+
+ bool should_block_io_on_clean() const final {
+ return false;
+ }
+
+ bool should_clean_space() const final {
+ return false;
+ }
+
+ clean_space_ret clean_space() final;
+
+ // Testing interfaces
+
+ bool check_usage() final {
+ // TODO
+ return true;
+ }
+
+ bool check_usage_is_empty() const final {
+ // TODO
+ return true;
+ }
+
+private:
+ const bool detailed;
+
+ BackrefManager &backref_manager;
+
+
+ struct {
+ /**
+ * used_bytes
+ *
+ * Bytes occupied by live extents
+ */
+ uint64_t used_bytes = 0;
+
+ /**
+ * projected_used_bytes
+ *
+ * Sum of projected bytes used by each transaction between throttle
+ * acquisition and commit completion. See reserve_projected_usage()
+ */
+ uint64_t projected_used_bytes = 0;
+ } stats;
+
+ ExtentCallbackInterface *extent_callback = nullptr;
+ BackgroundListener *background_callback = nullptr;
+};
}
*backref_manager, trimmer_config,
journal_type, roll_start, roll_size);
- auto segment_cleaner = SegmentCleaner::create(
- cleaner_config,
- std::move(sms),
- *backref_manager,
- cleaner_is_detailed);
+ AsyncCleanerRef cleaner;
if (journal_type == journal_type_t::SEGMENTED) {
+ cleaner = SegmentCleaner::create(
+ cleaner_config,
+ std::move(sms),
+ *backref_manager,
+ cleaner_is_detailed);
+ auto segment_cleaner = static_cast<SegmentCleaner*>(cleaner.get());
cache->set_segment_provider(*segment_cleaner);
segment_cleaner->set_journal_trimmer(*journal_trimmer);
+ } else {
+ cleaner = RBMCleaner::create(
+ *backref_manager,
+ cleaner_is_detailed);
}
JournalRef journal;
if (journal_type == journal_type_t::SEGMENTED) {
+ auto segment_cleaner = static_cast<SegmentCleaner*>(cleaner.get());
journal = journal::make_segmented(
*segment_cleaner,
*journal_trimmer);
"");
}
- epm->init(std::move(journal_trimmer), std::move(segment_cleaner));
+ epm->init(std::move(journal_trimmer), std::move(cleaner));
epm->set_primary_device(primary_device);
return std::make_unique<TransactionManager>(