CephContext *cct = m_image_ctx.cct;
bool wake_up = false;
+ Contexts waiter_contexts;
{
RWLock::WLocker locker(m_lock);
assert(m_write_blockers > 0);
<< m_write_blockers << dendl;
if (m_write_blockers == 0) {
wake_up = true;
+ std::swap(waiter_contexts, m_unblocked_write_waiter_contexts);
}
}
if (wake_up) {
+ for (auto ctx : waiter_contexts) {
+ ctx->complete(0);
+ }
this->signal();
}
}
+template <typename I>
+void ImageRequestWQ<I>::wait_on_writes_unblocked(Context *on_unblocked) {
+ assert(m_image_ctx.owner_lock.is_locked());
+ CephContext *cct = m_image_ctx.cct;
+
+ {
+ RWLock::WLocker locker(m_lock);
+ ldout(cct, 20) << &m_image_ctx << ", " << "write_blockers="
+ << m_write_blockers << dendl;
+ if (!m_unblocked_write_waiter_contexts.empty() || m_write_blockers > 0) {
+ m_unblocked_write_waiter_contexts.push_back(on_unblocked);
+ return;
+ }
+ }
+
+ on_unblocked->complete(0);
+}
+
template <typename I>
void ImageRequestWQ<I>::set_require_lock(Direction direction, bool enabled) {
CephContext *cct = m_image_ctx.cct;
void block_writes(Context *on_blocked);
void unblock_writes();
+ void wait_on_writes_unblocked(Context *on_unblocked);
+
void set_require_lock(Direction direction, bool enabled);
void apply_qos_limit(uint64_t limit, const uint64_t flag);
mutable RWLock m_lock;
Contexts m_write_blocker_contexts;
uint32_t m_write_blockers = 0;
+ Contexts m_unblocked_write_waiter_contexts;
bool m_require_lock_on_read = false;
bool m_require_lock_on_write = false;
std::atomic<unsigned> m_queued_reads { 0 };