]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
librbd: delay processing of next journal entry until flush in-progress
authorJason Dillaman <dillaman@redhat.com>
Fri, 6 May 2016 13:47:01 +0000 (09:47 -0400)
committerJason Dillaman <dillaman@redhat.com>
Sat, 7 May 2016 12:23:18 +0000 (08:23 -0400)
When replaying a journal flush event, do not start processing the next
journal entry until after the flush is in progress to ensure the barrier
is correctly guarding against future writes.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/journal/Replay.cc
src/librbd/journal/Replay.h

index aca64d9ba82f1b63441228d19b1cb58ab6258549..efe534c7fcdbf620fba94ac719c4d06240f7fb06 100644 (file)
@@ -166,7 +166,7 @@ void Replay<I>::shut_down(bool cancel_ops, Context *on_finish) {
 
     // safely commit any remaining AIO modify operations
     if ((m_in_flight_aio_flush + m_in_flight_aio_modify) != 0) {
-      flush_comp = create_aio_flush_completion(nullptr, nullptr);;
+      flush_comp = create_aio_flush_completion(nullptr);
     }
 
     for (auto &op_event_pair : m_op_events) {
@@ -214,7 +214,7 @@ void Replay<I>::flush(Context *on_finish) {
   {
     Mutex::Locker locker(m_lock);
     aio_comp = create_aio_flush_completion(
-      nullptr, util::create_async_context_callback(m_image_ctx, on_finish));
+      util::create_async_context_callback(m_image_ctx, on_finish));
   }
 
   RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
@@ -273,7 +273,7 @@ void Replay<I>::handle_event(const journal::AioDiscardEvent &event,
                                   event.length);
   if (flush_required) {
     m_lock.Lock();
-    AioCompletion *flush_comp = create_aio_flush_completion(nullptr, nullptr);
+    AioCompletion *flush_comp = create_aio_flush_completion(nullptr);
     m_lock.Unlock();
 
     AioImageRequest<I>::aio_flush(&m_image_ctx, flush_comp);
@@ -294,7 +294,7 @@ void Replay<I>::handle_event(const journal::AioWriteEvent &event,
                                 event.length, data.c_str(), 0);
   if (flush_required) {
     m_lock.Lock();
-    AioCompletion *flush_comp = create_aio_flush_completion(nullptr, nullptr);
+    AioCompletion *flush_comp = create_aio_flush_completion(nullptr);
     m_lock.Unlock();
 
     AioImageRequest<I>::aio_flush(&m_image_ctx, flush_comp);
@@ -310,9 +310,11 @@ void Replay<I>::handle_event(const journal::AioFlushEvent &event,
   AioCompletion *aio_comp;
   {
     Mutex::Locker locker(m_lock);
-    aio_comp = create_aio_flush_completion(on_ready, on_safe);
+    aio_comp = create_aio_flush_completion(on_safe);
   }
   AioImageRequest<I>::aio_flush(&m_image_ctx, aio_comp);
+
+  on_ready->complete(0);
 }
 
 template <typename I>
@@ -808,8 +810,7 @@ AioCompletion *Replay<I>::create_aio_modify_completion(Context *on_ready,
 }
 
 template <typename I>
-AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_ready,
-                                                      Context *on_safe) {
+AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_safe) {
   assert(m_lock.is_locked());
 
   ++m_in_flight_aio_flush;
@@ -819,10 +820,6 @@ AioCompletion *Replay<I>::create_aio_flush_completion(Context *on_ready,
       new C_AioFlushComplete(this, on_safe,
                              std::move(m_aio_modify_unsafe_contexts)));
   m_aio_modify_unsafe_contexts.clear();
-
-  if (on_ready != nullptr) {
-    on_ready->complete(0);
-  }
   return aio_comp;
 }
 
index a9d73c06dade17cc80e173827868cd149f93eac8..aeca5ba26db681f453eda6385c5c18bad34431ec 100644 (file)
@@ -168,8 +168,7 @@ private:
   AioCompletion *create_aio_modify_completion(Context *on_ready,
                                               Context *on_safe,
                                               bool *flush_required);
-  AioCompletion *create_aio_flush_completion(Context *on_ready,
-                                             Context *on_safe);
+  AioCompletion *create_aio_flush_completion(Context *on_safe);
   void handle_aio_completion(AioCompletion *aio_comp);
 
 };