In fact, we not only make sure ops in order in func process_writeback_dirty_entries,
but also make sure ops in order between func process_writeback_dirty_entries.
Signed-off-by: Jianpeng Ma <jianpeng.ma@intel.com>
(cherry picked from commit
76f4d29d92be3f9f45767cb1ac6cc50da528ecec)
m_lowest_flushing_sync_gen = log_entry->ram_entry.sync_gen_number;
}
m_flush_ops_in_flight += 1;
+ m_flush_ops_will_send += 1;
/* For write same this is the bytes affected by the flush op, not the bytes transferred */
m_flush_bytes_in_flight += log_entry->ram_entry.write_bytes;
all_clean = !m_flush_ops_in_flight;
break;
}
+
+ if (m_flush_ops_will_send) {
+ ldout(cct, 20) << "Previous flush-ops is still not sent" << dendl;
+ break;
+ }
auto candidate = m_dirty_log_entries.front();
bool flushable = can_flush_entry(candidate);
if (flushable) {
std::atomic<int> m_async_flush_ops = {0};
std::atomic<int> m_async_append_ops = {0};
+ std::atomic<int> m_flush_ops_will_send = {0};
/* Acquire locks in order declared here */
mutable ceph::mutex m_log_retire_lock;
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
+ this->m_flush_ops_will_send -= 1;
}), 0);
});
}
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
+ this->m_flush_ops_will_send -= 1;
}), 0);
});
}
<< " " << *log_entry << dendl;
log_entry->writeback_bl(this->m_image_writeback, ctx,
std::move(captured_entry_bl));
+ this->m_flush_ops_will_send -= 1;
}), 0);
} else {
m_image_ctx.op_work_queue->queue(new LambdaContext(
ldout(m_image_ctx.cct, 15) << "flushing:" << log_entry
<< " " << *log_entry << dendl;
log_entry->writeback(this->m_image_writeback, ctx);
+ this->m_flush_ops_will_send -= 1;
}), 0);
}
}