}
ceph_assert(!allocated);
if (!allocated && front_req) {
- /* front_req->alloc_resources() failed on the last iteration. We'll stop dispatching. */
+ /* front_req->alloc_resources() failed on the last iteration.
+ * We'll stop dispatching. */
+ wake_up();
front_req = nullptr;
ceph_assert(!cleared_dispatching_flag);
m_dispatching_deferred_ops = false;
/* Don't attempt buffer allocate if we've exceeded the "full" threshold */
if (m_bytes_allocated + bytes_allocated > bytes_allocated_cap) {
if (!req->has_io_waited_for_buffers()) {
- req->set_io_waited_for_entries(true);
+ req->set_io_waited_for_buffers(true);
ldout(m_image_ctx.cct, 1) << "Waiting for allocation cap (cap="
<< bytes_allocated_cap
<< ", allocated=" << m_bytes_allocated
m_bytes_allocated += bytes_allocated;
m_bytes_cached += bytes_cached;
m_bytes_dirty += bytes_dirtied;
+ if (req->has_io_waited_for_buffers()) {
+ req->set_io_waited_for_buffers(false);
+ }
+
} else {
alloc_succeeds = false;
}
(utime_t(ceph_clock_now() - started).to_msec() < RETIRE_BATCH_TIME_LIMIT_MS))) {
if (!retire_entries((this->m_shutting_down || this->m_invalidating ||
(this->m_bytes_allocated > aggressive_high_water_bytes) ||
- (m_log_entries.size() > aggressive_high_water_entries))
+ (m_log_entries.size() > aggressive_high_water_entries) ||
+ this->m_alloc_failed_since_retire)
? MAX_ALLOC_PER_TRANSACTION
: MAX_FREE_PER_TRANSACTION)) {
break;
buffer.allocation_lat = ceph_clock_now() - before_reserve;
if (TOID_IS_NULL(buffer.buffer_oid)) {
if (!req->has_io_waited_for_buffers()) {
- req->set_io_waited_for_entries(true);
+ req->set_io_waited_for_buffers(true);
}
ldout(m_image_ctx.cct, 5) << "can't allocate all data buffers: "
<< pmemobj_errormsg() << ". "