{
ceph_assert(max_journal_bytes <= DEVICE_OFF_MAX);
ceph_assert(max_journal_bytes > target_journal_dirty_bytes);
- ceph_assert(target_journal_dirty_bytes >= min_journal_bytes);
+ ceph_assert(target_journal_dirty_bytes >= min_journal_dirty_bytes);
ceph_assert(target_journal_dirty_bytes > rewrite_dirty_bytes_per_cycle);
ceph_assert(max_journal_bytes > target_journal_alloc_bytes);
ceph_assert(rewrite_dirty_bytes_per_trans > 0);
assert(roll_size);
std::size_t target_dirty_bytes = 0;
std::size_t target_alloc_bytes = 0;
- std::size_t min_journal_bytes = 0;
+ std::size_t min_dirty_bytes = 0;
std::size_t max_journal_bytes = 0;
if (type == backend_type_t::SEGMENTED) {
- min_journal_bytes = 12 * roll_size;
+ min_dirty_bytes = 12 * roll_size;
target_alloc_bytes = 2 * roll_size;
target_dirty_bytes = 14 * roll_size;
max_journal_bytes = 16 * roll_size;
} else {
assert(type == backend_type_t::RANDOM_BLOCK);
- min_journal_bytes = roll_size / 4;
+ min_dirty_bytes = roll_size / 4;
target_alloc_bytes = roll_size / 4;
target_dirty_bytes = roll_size / 3;
max_journal_bytes = roll_size / 2;
return config_t{
target_dirty_bytes,
target_alloc_bytes,
- min_journal_bytes,
+ min_dirty_bytes,
max_journal_bytes,
1<<26,// rewrite_dirty_bytes_per_cycle, 64MB
1<<17,// rewrite_dirty_bytes_per_trans, 128KB
assert(roll_size);
std::size_t target_dirty_bytes = 0;
std::size_t target_alloc_bytes = 0;
- std::size_t min_journal_bytes = 0;
+ std::size_t min_dirty_bytes = 0;
std::size_t max_journal_bytes = 0;
if (type == backend_type_t::SEGMENTED) {
- min_journal_bytes = 2 * roll_size;
+ min_dirty_bytes = 2 * roll_size;
target_alloc_bytes = 2 * roll_size;
target_dirty_bytes = 3 * roll_size;
max_journal_bytes = 4 * roll_size;
} else {
assert(type == backend_type_t::RANDOM_BLOCK);
- min_journal_bytes = roll_size / 36;
+ min_dirty_bytes = roll_size / 36;
target_alloc_bytes = roll_size / 4;
target_dirty_bytes = roll_size / 24;
max_journal_bytes = roll_size / 2;
return config_t{
target_dirty_bytes,
target_alloc_bytes,
- min_journal_bytes,
+ min_dirty_bytes,
max_journal_bytes,
(target_dirty_bytes > 1<<26)
? 1<<25
assert(background_callback->is_ready());
auto ret = journal_head.add_offset(
backend_type,
- -static_cast<device_off_t>(config.min_journal_bytes),
+ -static_cast<device_off_t>(config.min_journal_dirty_bytes),
roll_start,
roll_size);
return ret;
public:
struct config_t {
/// Number of minimum bytes to start trimming dirty,
- // this must be larger than or equal to min_journal_bytes,
+ // this must be larger than or equal to min_journal_dirty_bytes,
// otherwise trim_dirty may never happen.
std::size_t target_journal_dirty_bytes = 0;
/// Number of minimum bytes to stop trimming allocation
/// (having the corresponding backrefs unmerged)
std::size_t target_journal_alloc_bytes = 0;
/// Number of minimum dirty bytes of the journal.
- std::size_t min_journal_bytes = 0;
+ std::size_t min_journal_dirty_bytes = 0;
/// Number of maximum bytes to block user transactions.
std::size_t max_journal_bytes = 0;
/// Number of bytes to rewrite dirty per cycle
}
std::size_t get_max_dirty_bytes_to_trim() const {
auto journal_dirty_bytes = get_journal_dirty_bytes();
- if (journal_dirty_bytes <= config.min_journal_bytes) {
+ if (journal_dirty_bytes <= config.min_journal_dirty_bytes) {
return 0;
}
- return journal_dirty_bytes - config.min_journal_bytes;
+ return journal_dirty_bytes - config.min_journal_dirty_bytes;
}
std::size_t get_dirty_bytes_to_trim() const {
return std::min(get_max_dirty_bytes_to_trim(),