RecordSubmitter::roll_segment()
{
LOG_PREFIX(RecordSubmitter::roll_segment);
- assert(p_current_batch->needs_flush() ||
- is_available());
+ ceph_assert(p_current_batch->needs_flush() ||
+ is_available());
// #1 block concurrent submissions due to rolling
wait_available_promise = seastar::shared_promise<>();
- assert(!wait_unfull_flush_promise.has_value());
+ ceph_assert(!wait_unfull_flush_promise.has_value());
return [FNAME, this] {
if (p_current_batch->is_pending()) {
if (state == state_t::FULL) {
bool with_atomic_roll_segment)
{
LOG_PREFIX(RecordSubmitter::submit);
- assert(is_available());
+ ceph_assert(is_available());
assert(check_action(record.size) != action_t::ROLL);
segment_allocator.get_provider().update_modify_time(
segment_allocator.get_segment_id(),
assert(p_current_batch->is_pending());
} else {
wait_available_promise = seastar::shared_promise<>();
- assert(!wait_unfull_flush_promise.has_value());
+ ceph_assert(!wait_unfull_flush_promise.has_value());
wait_unfull_flush_promise = seastar::promise<>();
// flush and mark available in background
std::ignore = wait_unfull_flush_promise->get_future(
RecordSubmitter::close_ertr::future<>
RecordSubmitter::close()
{
- assert(state == state_t::IDLE);
- assert(num_outstanding_io == 0);
+ ceph_assert(state == state_t::IDLE);
+ ceph_assert(num_outstanding_io == 0);
committed_to = JOURNAL_SEQ_NULL;
- assert(p_current_batch != nullptr);
- assert(p_current_batch->is_empty());
- assert(!wait_available_promise.has_value());
+ ceph_assert(p_current_batch != nullptr);
+ ceph_assert(p_current_batch->is_empty());
+ ceph_assert(!wait_available_promise.has_value());
has_io_error = false;
- assert(!wait_unfull_flush_promise.has_value());
+ ceph_assert(!wait_unfull_flush_promise.has_value());
metrics.clear();
return segment_allocator.close();
}
return;
}
} else {
- assert(!wait_unfull_flush_promise.has_value());
+ ceph_assert(!wait_unfull_flush_promise.has_value());
}
auto needs_flush = (