]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: AIO work queue needs to support dynamic feature updates
authorJason Dillaman <dillaman@redhat.com>
Fri, 8 Apr 2016 18:27:46 +0000 (14:27 -0400)
committerJason Dillaman <dillaman@redhat.com>
Sat, 9 Apr 2016 16:59:00 +0000 (12:59 -0400)
If the exclusive lock is enabled/disabled during an IO-driven
image refresh, we need to avoid deadlocking the queue.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/AioImageRequestWQ.cc
src/librbd/AioImageRequestWQ.h

index 352b7e9f38db836e560f3ef4d31ec414611f500c..97c6fa4cda0b13e8516b485f366db5b9e72a8c5a 100644 (file)
@@ -302,17 +302,24 @@ void AioImageRequestWQ::clear_require_lock_on_read() {
 
 void *AioImageRequestWQ::_void_dequeue() {
   AioImageRequest<> *peek_item = front();
-  if (peek_item == NULL || m_refresh_in_progress) {
-    return NULL;
+
+  // no IO ops available or refresh in-progress (IO stalled)
+  if (peek_item == nullptr || m_refresh_in_progress) {
+    return nullptr;
   }
 
+  bool refresh_required = m_image_ctx.state->is_refresh_required();
   {
     RWLock::RLocker locker(m_lock);
     if (peek_item->is_write_op()) {
       if (m_write_blockers > 0) {
-        return NULL;
+        return nullptr;
+      }
+
+      // refresh will requeue the op -- don't count it as in-progress
+      if (!refresh_required) {
+        m_in_progress_writes.inc();
       }
-      m_in_progress_writes.inc();
     } else if (m_require_lock_on_read) {
       return nullptr;
     }
@@ -322,15 +329,17 @@ void *AioImageRequestWQ::_void_dequeue() {
     ThreadPool::PointerWQ<AioImageRequest<> >::_void_dequeue());
   assert(peek_item == item);
 
-  if (m_image_ctx.state->is_refresh_required()) {
+  if (refresh_required) {
     ldout(m_image_ctx.cct, 15) << "image refresh required: delaying IO " << item
                                << dendl;
+
+    // stall IO until the refresh completes
     m_refresh_in_progress = true;
 
     get_pool_lock().Unlock();
     m_image_ctx.state->refresh(new C_RefreshFinish(this, item));
     get_pool_lock().Lock();
-    return NULL;
+    return nullptr;
   }
   return item;
 }
@@ -345,21 +354,34 @@ void AioImageRequestWQ::process(AioImageRequest<> *req) {
     req->send();
   }
 
+  finish_queued_op(req);
+  if (req->is_write_op()) {
+    finish_in_progress_write();
+  }
+  delete req;
+
+  finish_in_flight_op();
+}
+
+void AioImageRequestWQ::finish_queued_op(AioImageRequest<> *req) {
+  RWLock::RLocker locker(m_lock);
+  if (req->is_write_op()) {
+    assert(m_queued_writes.read() > 0);
+    m_queued_writes.dec();
+  } else {
+    assert(m_queued_reads.read() > 0);
+    m_queued_reads.dec();
+  }
+}
+
+void AioImageRequestWQ::finish_in_progress_write() {
   bool writes_blocked = false;
   {
     RWLock::RLocker locker(m_lock);
-    if (req->is_write_op()) {
-      assert(m_queued_writes.read() > 0);
-      m_queued_writes.dec();
-
-      assert(m_in_progress_writes.read() > 0);
-      if (m_in_progress_writes.dec() == 0 &&
-          !m_write_blocker_contexts.empty()) {
-        writes_blocked = true;
-      }
-    } else {
-      assert(m_queued_reads.read() > 0);
-      m_queued_reads.dec();
+    assert(m_in_progress_writes.read() > 0);
+    if (m_in_progress_writes.dec() == 0 &&
+        !m_write_blocker_contexts.empty()) {
+      writes_blocked = true;
     }
   }
 
@@ -367,9 +389,6 @@ void AioImageRequestWQ::process(AioImageRequest<> *req) {
     RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
     m_image_ctx.flush(new C_BlockedWrites(this));
   }
-  delete req;
-
-  finish_in_flight_op();
 }
 
 int AioImageRequestWQ::start_in_flight_op(AioCompletion *c) {
@@ -440,12 +459,24 @@ void AioImageRequestWQ::handle_refreshed(int r, AioImageRequest<> *req) {
                  << "req=" << req << dendl;
   if (r < 0) {
     req->fail(r);
+    delete req;
+
+    finish_queued_op(req);
+    finish_in_flight_op();
   } else {
-    process(req);
-    process_finish();
+    // since IO was stalled for refresh -- original IO order is preserved
+    // if we requeue this op for work queue processing
+    requeue(req);
+  }
 
-    m_refresh_in_progress = false;
-    signal();
+  m_refresh_in_progress = false;
+  signal();
+
+  // refresh might have enabled exclusive lock -- IO stalled until
+  // we acquire the lock
+  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
+  if (is_lock_required() && is_lock_request_needed()) {
+    m_image_ctx.exclusive_lock->request_lock(nullptr);
   }
 }
 
index fa429fc7e9e08f2e9598db44adb5793f2f8db54a..e21aa860f67bc23de138e0db69b26c0192fa2425 100644 (file)
@@ -104,6 +104,9 @@ private:
     return (m_queued_writes.read() == 0);
   }
 
+  void finish_queued_op(AioImageRequest<ImageCtx> *req);
+  void finish_in_progress_write();
+
   int start_in_flight_op(AioCompletion *c);
   void finish_in_flight_op();