config(config),
journal_type(type),
roll_start(roll_start),
- roll_size(roll_size)
+ roll_size(roll_size),
+ reserved_usage(0)
{
config.validate();
ceph_assert(roll_start >= 0);
}
}
-void SegmentCleaner::reserve_projected_usage(std::size_t projected_usage)
+bool SegmentCleaner::try_reserve_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
stats.projected_used_bytes += projected_usage;
- ++stats.projected_count;
- stats.projected_used_bytes_sum += stats.projected_used_bytes;
+ if (should_block_io_on_clean()) {
+ stats.projected_used_bytes -= projected_usage;
+ return false;
+ } else {
+ ++stats.projected_count;
+ stats.projected_used_bytes_sum += stats.projected_used_bytes;
+ return true;
+ }
}
void SegmentCleaner::release_projected_usage(std::size_t projected_usage)
}
}
-void RBMCleaner::reserve_projected_usage(std::size_t projected_usage)
+bool RBMCleaner::try_reserve_projected_usage(std::size_t projected_usage)
{
assert(background_callback->is_ready());
stats.projected_used_bytes += projected_usage;
+ return true;
}
void RBMCleaner::release_projected_usage(std::size_t projected_usage)
virtual void update_journal_tails(
journal_seq_t dirty_tail, journal_seq_t alloc_tail) = 0;
+ // try reserve the projected usage in journal
+ // returns if the reservation is successful
+ // if the reservation is successful, user should call
+ // release_inline_usage to restore.
+ virtual bool try_reserve_inline_usage(std::size_t usage) = 0;
+
+ // release the projected usage in journal
+ virtual void release_inline_usage(std::size_t usage) = 0;
+
virtual ~JournalTrimmer() {}
journal_seq_t get_journal_tail() const {
}
bool should_block_io_on_trim() const {
- return get_tail_limit() > get_journal_tail();
+ return get_tail_limit() >
+ get_journal_tail().add_offset(
+ journal_type, reserved_usage, roll_start, roll_size);
+ }
+
+ bool try_reserve_inline_usage(std::size_t usage) final {
+ reserved_usage += usage;
+ if (should_block_io_on_trim()) {
+ reserved_usage -= usage;
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ void release_inline_usage(std::size_t usage) final {
+ ceph_assert(reserved_usage >= usage);
+ reserved_usage -= usage;
}
using trim_ertr = crimson::errorator<
journal_seq_t journal_dirty_tail;
journal_seq_t journal_alloc_tail;
+ std::size_t reserved_usage;
+
seastar::metrics::metric_group metrics;
};
}
std::unique_ptr<RBMSpaceTracker> make_empty() const {
- auto ret = std::make_unique<RBMSpaceTracker>(*this);
+ auto ret = std::make_unique<RBMSpaceTracker>(*this);
ret->reset();
return ret;
}
virtual void commit_space_used(paddr_t, extent_len_t) = 0;
- virtual void reserve_projected_usage(std::size_t) = 0;
+ // try reserve the projected usage in cleaner
+ // returns if the reservation is successful
+ // if the reservation is successful, user should call
+ // release_projected_usage to restore.
+ virtual bool try_reserve_projected_usage(std::size_t) = 0;
virtual void release_projected_usage(std::size_t) = 0;
mark_space_used(addr, len);
}
- void reserve_projected_usage(std::size_t) final;
+ bool try_reserve_projected_usage(std::size_t) final;
void release_projected_usage(size_t) final;
* projected_used_bytes
*
* Sum of projected bytes used by each transaction between throttle
- * acquisition and commit completion. See reserve_projected_usage()
+ * acquisition and commit completion. See try_reserve_projected_usage()
*/
uint64_t projected_used_bytes = 0;
uint64_t projected_count = 0;
void commit_space_used(paddr_t, extent_len_t) final;
- void reserve_projected_usage(std::size_t) final;
+ bool try_reserve_projected_usage(std::size_t) final;
void release_projected_usage(size_t) final;
);
}
+ExtentPlacementManager::BackgroundProcess::reserve_result_t
+ExtentPlacementManager::BackgroundProcess::try_reserve(
+ const projected_usage_t &usage)
+{
+ reserve_result_t res {
+ trimmer->try_reserve_inline_usage(usage.inline_usage),
+ cleaner->try_reserve_projected_usage(usage.inline_usage + usage.ool_usage)
+ };
+
+ if (!res.is_successful()) {
+ if (res.reserve_inline_success) {
+ trimmer->release_inline_usage(usage.inline_usage);
+ }
+ if (res.reserve_ool_success) {
+ cleaner->release_projected_usage(usage.inline_usage + usage.ool_usage);
+ }
+ }
+ return res;
+}
+
seastar::future<>
ExtentPlacementManager::BackgroundProcess::reserve_projected_usage(
projected_usage_t usage)
{
- auto projected_usage = usage.inline_usage + usage.ool_usage;
ceph_assert(is_ready());
ceph_assert(!blocking_io);
// The pipeline configuration prevents another IO from entering
// prepare until the prior one exits and clears this.
++stats.io_count;
- bool is_blocked = false;
- if (trimmer->should_block_io_on_trim()) {
- is_blocked = true;
- ++stats.io_blocked_count_trim;
- }
- if (cleaner->should_block_io_on_clean()) {
- is_blocked = true;
- ++stats.io_blocked_count_clean;
- }
- if (is_blocked) {
+
+ auto res = try_reserve(usage);
+ if (res.is_successful()) {
+ return seastar::now();
+ } else {
+ if (!res.reserve_inline_success) {
+ ++stats.io_blocked_count_trim;
+ }
+ if (!res.reserve_ool_success) {
+ ++stats.io_blocked_count_clean;
+ }
++stats.io_blocking_num;
++stats.io_blocked_count;
stats.io_blocked_sum += stats.io_blocking_num;
- }
- return seastar::do_until(
- [this] {
- log_state("reserve_projected_usage(await_hard_limits)");
- return !should_block_io();
- },
- [this] {
+
+ return seastar::repeat([this, usage] {
blocking_io = seastar::promise<>();
- return blocking_io->get_future();
- }
- ).then([this, is_blocked, projected_usage] {
- ceph_assert(!blocking_io);
- if (is_blocked) {
- assert(stats.io_blocking_num > 0);
- --stats.io_blocking_num;
- }
- cleaner->reserve_projected_usage(projected_usage);
- });
+ return blocking_io->get_future(
+ ).then([this, usage] {
+ ceph_assert(!blocking_io);
+ auto res = try_reserve(usage);
+ if (res.is_successful()) {
+ assert(stats.io_blocking_num > 0);
+ --stats.io_blocking_num;
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::yes);
+ } else {
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::no);
+ }
+ });
+ });
+ }
}
seastar::future<>
void release_projected_usage(projected_usage_t usage) {
ceph_assert(is_ready());
+ trimmer->release_inline_usage(usage.inline_usage);
cleaner->release_projected_usage(usage.inline_usage + usage.ool_usage);
}
cleaner->should_block_io_on_clean();
}
+ struct reserve_result_t {
+ bool reserve_inline_success = true;
+ bool reserve_ool_success = true;
+
+ bool is_successful() const {
+ return reserve_inline_success && reserve_ool_success;
+ }
+ };
+
+ reserve_result_t try_reserve(const projected_usage_t &usage);
+
seastar::future<> do_background_cycle();
void register_metrics();
void update_journal_tails(journal_seq_t, journal_seq_t) final {}
+ bool try_reserve_inline_usage(std::size_t) final { return true; }
+
+ void release_inline_usage(std::size_t) final {}
+
/*
* SegmentProvider interfaces
*/
return seastar::now();
}
+ bool try_reserve_inline_usage(std::size_t) final { return true; }
+
+ void release_inline_usage(std::size_t) final {}
+
auto submit_record(record_t&& record) {
entries.push_back(record);
OrderingHandle handle = get_dummy_ordering_handle();
void update_journal_tails(journal_seq_t, journal_seq_t) final {}
+ bool try_reserve_inline_usage(std::size_t) final { return true; }
+
+ void release_inline_usage(std::size_t) final {}
+
/*
* SegmentProvider interfaces
*/