journal_seq_t alloc_replay_from)
{
LOG_PREFIX(SegmentCleaner::update_journal_tail_target);
- if (dirty_replay_from.offset.get_addr_type() == addr_types_t::RANDOM_BLOCK) {
- return;
- }
+ if (disable_trim) return;
+ assert(dirty_replay_from.offset.get_addr_type() != addr_types_t::RANDOM_BLOCK);
+ assert(alloc_replay_from.offset.get_addr_type() != addr_types_t::RANDOM_BLOCK);
if (dirty_extents_replay_from == JOURNAL_SEQ_NULL
|| dirty_replay_from > dirty_extents_replay_from) {
DEBUG("dirty_extents_replay_from={} => {}",
void SegmentCleaner::update_journal_tail_committed(journal_seq_t committed)
{
LOG_PREFIX(SegmentCleaner::update_journal_tail_committed);
+ assert(committed.offset.get_addr_type() != addr_types_t::RANDOM_BLOCK);
if (committed == JOURNAL_SEQ_NULL) {
return;
}
void SegmentCleaner::complete_init()
{
LOG_PREFIX(SegmentCleaner::complete_init);
+ if (disable_trim) {
+ init_complete = true;
+ return;
+ }
INFO("done, start GC");
ceph_assert(segments.get_journal_head() != JOURNAL_SEQ_NULL);
init_complete = true;
SegmentSeqAllocatorRef ool_segment_seq_allocator;
+ bool disable_trim = false; // for test
public:
SegmentCleaner(
config_t config,
void init_mkfs() {
auto journal_head = segments.get_journal_head();
- ceph_assert(journal_head != JOURNAL_SEQ_NULL);
+ ceph_assert(disable_trim || journal_head != JOURNAL_SEQ_NULL);
journal_tail_target = journal_head;
journal_tail_committed = journal_head;
}
return space_tracker->equals(tracker);
}
+ void set_disable_trim(bool val){
+ disable_trim = val;
+ }
+
using work_ertr = ExtentCallbackInterface::extent_mapping_ertr;
using work_iertr = ExtentCallbackInterface::extent_mapping_iertr;
* Encapsulates whether block pending gc.
*/
bool should_block_on_trim() const {
+ if (disable_trim) return false;
return get_dirty_tail_limit() > journal_tail_target;
}
bool should_block_on_reclaim() const {
+ if (disable_trim) return false;
if (get_segments_reclaimable() == 0) {
return false;
}
void log_gc_state(const char *caller) const {
auto &logger = crimson::get_logger(ceph_subsys_seastore_cleaner);
- if (logger.is_enabled(seastar::log_level::debug)) {
+ if (logger.is_enabled(seastar::log_level::debug) &&
+ !disable_trim) {
logger.debug(
"SegmentCleaner::log_gc_state({}): "
"empty {}, "
public:
seastar::future<> reserve_projected_usage(size_t projected_usage) {
+ if (disable_trim) {
+ return seastar::now();
+ }
ceph_assert(init_complete);
// The pipeline configuration prevents another IO from entering
// prepare until the prior one exits and clears this.
}
void release_projected_usage(size_t projected_usage) {
+ if (disable_trim) return;
ceph_assert(init_complete);
ceph_assert(stats.projected_used_bytes >= projected_usage);
stats.projected_used_bytes -= projected_usage;
* Encapsulates logic for whether gc should be reclaiming segment space.
*/
bool gc_should_reclaim_space() const {
+ if (disable_trim) return false;
if (get_segments_reclaimable() == 0) {
return false;
}
* True if gc should be running.
*/
bool gc_should_run() const {
+ if (disable_trim) return false;
ceph_assert(init_complete);
return gc_should_reclaim_space() || gc_should_trim_journal();
}