]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
librbd: switch all queued contexts in IO path to use asio post
authorJason Dillaman <dillaman@redhat.com>
Wed, 17 Jun 2020 19:20:35 +0000 (15:20 -0400)
committerJason Dillaman <dillaman@redhat.com>
Thu, 16 Jul 2020 19:59:31 +0000 (15:59 -0400)
The post operation is similar to the asio::ContextWQ::queue wrapper
but execution is not limited to a single thread strand.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
src/librbd/AsioEngine.cc
src/librbd/AsioEngine.h
src/librbd/io/AioCompletion.cc
src/librbd/io/AsyncOperation.cc
src/librbd/io/CopyupRequest.cc
src/librbd/io/CopyupRequest.h
src/librbd/io/ObjectDispatch.cc
src/librbd/io/ObjectRequest.cc
src/librbd/io/QosImageDispatch.cc
src/librbd/io/QueueImageDispatch.cc
src/librbd/io/SimpleSchedulerObjectDispatch.cc

index 3e62988283be78e28ccb86bc30394679b6901f95..535ad57b5ac0f8c2258c5fe7e160c5414563cd88 100644 (file)
@@ -2,6 +2,7 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "librbd/AsioEngine.h"
+#include "include/Context.h"
 #include "include/stringify.h"
 #include "include/neorados/RADOS.hpp"
 #include "include/rados/librados.hpp"
@@ -41,4 +42,12 @@ AsioEngine::~AsioEngine() {
   ldout(m_cct, 20) << dendl;
 }
 
+void AsioEngine::dispatch(Context* ctx, int r) {
+  dispatch([ctx, r]() { ctx->complete(r); });
+}
+
+void AsioEngine::post(Context* ctx, int r) {
+  post([ctx, r]() { ctx->complete(r); });
+}
+
 } // namespace librbd
index d338431372498fc6a97ea37610ec426ef771518f..00cb7f7f8a102f339b1a50d978e6bd86f1df4684 100644 (file)
@@ -7,9 +7,12 @@
 #include "include/common_fwd.h"
 #include "include/rados/librados_fwd.hpp"
 #include <memory>
+#include <boost/asio/dispatch.hpp>
 #include <boost/asio/io_context.hpp>
 #include <boost/asio/io_context_strand.hpp>
+#include <boost/asio/post.hpp>
 
+struct Context;
 namespace neorados { struct RADOS; }
 
 namespace librbd {
@@ -49,6 +52,18 @@ public:
     return m_context_wq.get();
   }
 
+  template <typename T>
+  void dispatch(T&& t) {
+    boost::asio::dispatch(m_io_context, std::forward<T>(t));
+  }
+  void dispatch(Context* ctx, int r);
+
+  template <typename T>
+  void post(T&& t) {
+    boost::asio::post(m_io_context, std::forward<T>(t));
+  }
+  void post(Context* ctx, int r);
+
 private:
   std::shared_ptr<neorados::RADOS> m_rados_api;
   CephContext* m_cct;
index 24e7afa4c75b7fb0f6e2b5c3a3d5e6525a420274..a50677215818be4554f8ead5e0faf7e2321e9df8 100644 (file)
@@ -157,8 +157,8 @@ void AioCompletion::queue_complete() {
   add_request();
 
   // ensure completion fires in clean lock context
-  boost::asio::post(ictx->asio_engine, boost::asio::bind_executor(
-    ictx->asio_engine.get_api_strand(), [this]() {
+  boost::asio::post(*ictx->asio_engine, boost::asio::bind_executor(
+    ictx->asio_engine->get_api_strand(), [this]() {
       complete_request(0);
     }));
 }
@@ -261,8 +261,8 @@ void AioCompletion::complete_external_callback() {
 
   // ensure librbd external users never experience concurrent callbacks
   // from multiple librbd-internal threads.
-  boost::asio::dispatch(ictx->asio_engine, boost::asio::bind_executor(
-    ictx->asio_engine.get_api_strand(), [this]() {
+  boost::asio::dispatch(*ictx->asio_engine, boost::asio::bind_executor(
+    ictx->asio_engine->get_api_strand(), [this]() {
       complete_cb(rbd_comp, complete_arg);
       complete_event_socket();
       put();
index 4c4d508e7d25357745030fc2e70e6aba7968cc1f..18db2410e4a5a3a9dc96c06c18c5ab9120751c77 100644 (file)
@@ -2,10 +2,10 @@
 // vim: ts=8 sw=2 smarttab
 
 #include "librbd/io/AsyncOperation.h"
-#include "librbd/ImageCtx.h"
-#include "librbd/asio/ContextWQ.h"
-#include "common/dout.h"
 #include "include/ceph_assert.h"
+#include "common/dout.h"
+#include "librbd/AsioEngine.h"
+#include "librbd/ImageCtx.h"
 
 #define dout_subsys ceph_subsys_rbd
 #undef dout_prefix
@@ -70,7 +70,7 @@ void AsyncOperation::finish_op() {
   if (!m_flush_contexts.empty()) {
     C_CompleteFlushes *ctx = new C_CompleteFlushes(m_image_ctx,
                                                    std::move(m_flush_contexts));
-    m_image_ctx->op_work_queue->queue(ctx);
+    m_image_ctx->asio_engine->post(ctx, 0);
   }
 }
 
@@ -87,7 +87,7 @@ void AsyncOperation::flush(Context* on_finish) {
     }
   }
 
-  m_image_ctx->op_work_queue->queue(on_finish);
+  m_image_ctx->asio_engine->post(on_finish, 0);
 }
 
 } // namespace io
index d41573d693112c64dce48c081f1e9488de1363a4..27d396a0eaf106b9eb576fa9defddf99a94f945e 100644 (file)
@@ -6,6 +6,7 @@
 #include "common/ceph_mutex.h"
 #include "common/dout.h"
 #include "common/errno.h"
+#include "librbd/AsioEngine.h"
 #include "librbd/AsyncObjectThrottle.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/ImageCtx.h"
@@ -154,10 +155,8 @@ void CopyupRequest<I>::read_from_parent() {
   if (m_image_ctx->parent == nullptr) {
     ldout(cct, 5) << "parent detached" << dendl;
 
-    m_image_ctx->op_work_queue->queue(
-      util::create_context_callback<
-        CopyupRequest<I>, &CopyupRequest<I>::handle_read_from_parent>(this),
-      -ENOENT);
+    m_image_ctx->asio_engine->post(
+      [this]() { handle_read_from_parent(-ENOENT); });
     return;
   } else if (is_deep_copy()) {
     deep_copy();
@@ -470,21 +469,28 @@ template <typename I>
 void CopyupRequest<I>::handle_copyup(int r) {
   auto cct = m_image_ctx->cct;
   unsigned pending_copyups;
+  int copyup_ret_val = r;
   {
     std::lock_guard locker{m_lock};
     ceph_assert(m_pending_copyups > 0);
     pending_copyups = --m_pending_copyups;
+    if (m_copyup_ret_val < 0) {
+      copyup_ret_val = m_copyup_ret_val;
+    } else if (r < 0) {
+      m_copyup_ret_val = r;
+    }
   }
 
   ldout(cct, 20) << "r=" << r << ", "
                  << "pending=" << pending_copyups << dendl;
 
-  if (r < 0 && r != -ENOENT) {
-    lderr(cct) << "failed to copyup object: " << cpp_strerror(r) << dendl;
-    complete_requests(false, r);
-  }
-
   if (pending_copyups == 0) {
+    if (copyup_ret_val < 0 && copyup_ret_val != -ENOENT) {
+      lderr(cct) << "failed to copyup object: " << cpp_strerror(copyup_ret_val)
+                 << dendl;
+      complete_requests(false, copyup_ret_val);
+    }
+
     finish(0);
   }
 }
@@ -603,6 +609,7 @@ void CopyupRequest<I>::compute_deep_copy_snap_ids() {
       deep_copied.insert(it.second.front());
     }
   }
+  ldout(m_image_ctx->cct, 15) << "deep_copied=" << deep_copied << dendl;
 
   std::copy_if(m_image_ctx->snaps.rbegin(), m_image_ctx->snaps.rend(),
                std::back_inserter(m_snap_ids),
index 01d679f6213b7d8361b8a903cae2dcfee61a3015..1dc9d1ca4931e95d94199c21d56feaf55c253d2b 100644 (file)
@@ -99,6 +99,7 @@ private:
   ceph::mutex m_lock = ceph::make_mutex("CopyupRequest", false);
   WriteRequests m_pending_requests;
   unsigned m_pending_copyups = 0;
+  int m_copyup_ret_val = 0;
 
   WriteRequests m_restart_requests;
   bool m_append_request_permitted = true;
index ecf9ae23d078798a18f31108579719b502522c50..3e202f1b0795c74cdc162a03457ebb8f9787b69f 100644 (file)
@@ -3,9 +3,9 @@
 
 #include "librbd/io/ObjectDispatch.h"
 #include "common/dout.h"
+#include "librbd/AsioEngine.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/Utils.h"
-#include "librbd/asio/ContextWQ.h"
 #include "librbd/io/ObjectRequest.h"
 
 #define dout_subsys ceph_subsys_rbd
@@ -28,7 +28,7 @@ void ObjectDispatch<I>::shut_down(Context* on_finish) {
   auto cct = m_image_ctx->cct;
   ldout(cct, 5) << dendl;
 
-  m_image_ctx->op_work_queue->queue(on_finish, 0);
+  m_image_ctx->asio_engine->post(on_finish, 0);
 }
 
 template <typename I>
index 51efbae21efb77d82ea2fd5142d48e65d91f19a6..407bc1d33304518f744ef1ae6981d927ed639e94 100644 (file)
 #include "include/err.h"
 #include "osd/osd_types.h"
 
+#include "librbd/AsioEngine.h"
 #include "librbd/ExclusiveLock.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/ObjectMap.h"
 #include "librbd/Utils.h"
-#include "librbd/asio/ContextWQ.h"
 #include "librbd/io/AioCompletion.h"
 #include "librbd/io/CopyupRequest.h"
 #include "librbd/io/ImageRequest.h"
@@ -165,8 +165,7 @@ bool ObjectRequest<I>::compute_parent_extents(Extents *parent_extents,
 template <typename I>
 void ObjectRequest<I>::async_finish(int r) {
   ldout(m_ictx->cct, 20) << "r=" << r << dendl;
-  m_ictx->op_work_queue->queue(util::create_context_callback<
-    ObjectRequest<I>, &ObjectRequest<I>::finish>(this), r);
+  m_ictx->asio_engine->post([this, r]() { finish(r); });
 }
 
 template <typename I>
@@ -203,9 +202,7 @@ void ObjectReadRequest<I>::read_object() {
     std::shared_lock image_locker{image_ctx->image_lock};
     if (image_ctx->object_map != nullptr &&
         !image_ctx->object_map->object_may_exist(this->m_object_no)) {
-      image_ctx->op_work_queue->queue(new LambdaContext([this](int r) {
-          read_parent();
-        }), 0);
+      image_ctx->asio_engine->post([this]() { read_parent(); });
       return;
     }
   }
index f19471f8d73f35f36efa287b9060a11690fa0ea9..8badb58b1a4ac907d5fae39f68a98d21b875e356 100644 (file)
@@ -3,8 +3,8 @@
 
 #include "librbd/io/QosImageDispatch.h"
 #include "common/dout.h"
+#include "librbd/AsioEngine.h"
 #include "librbd/ImageCtx.h"
-#include "librbd/asio/ContextWQ.h"
 #include "librbd/io/FlushTracker.h"
 #include <map>
 
@@ -282,7 +282,7 @@ void QosImageDispatch<I>::handle_throttle_ready(Tag&& tag, uint64_t flag) {
 
   if (set_throttle_flag(tag.image_dispatch_flags, flag)) {
     // timer_lock is held -- so dispatch from outside the timer thread
-    m_image_ctx->op_work_queue->queue(tag.on_dispatched, 0);
+    m_image_ctx->asio_engine->post(tag.on_dispatched, 0);
   }
 }
 
index d901d2ebbd0bef13ec22225f0fa812b21404812f..7cb5dfc5148497f2d740c8ea293a3cbf6a1d77bf 100644 (file)
@@ -4,9 +4,9 @@
 #include "librbd/io/QueueImageDispatch.h"
 #include "common/dout.h"
 #include "common/Cond.h"
+#include "librbd/AsioEngine.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/Utils.h"
-#include "librbd/asio/ContextWQ.h"
 #include "librbd/io/AioCompletion.h"
 #include "librbd/io/ImageDispatchSpec.h"
 
@@ -115,7 +115,7 @@ bool QueueImageDispatch<I>::enqueue(
   }
 
   *dispatch_result = DISPATCH_RESULT_CONTINUE;
-  m_image_ctx->op_work_queue->queue(on_dispatched, 0);
+  m_image_ctx->asio_engine->post(on_dispatched, 0);
   return true;
 }
 
index ec9c3b434084f8ac71e7a6f2ca5159e5232cd5e0..f6781b6646e65b29d3173e3b81a1eb4552d2437f 100644 (file)
@@ -4,9 +4,9 @@
 #include "librbd/io/SimpleSchedulerObjectDispatch.h"
 #include "common/Timer.h"
 #include "common/errno.h"
+#include "librbd/AsioEngine.h"
 #include "librbd/ImageCtx.h"
 #include "librbd/Utils.h"
-#include "librbd/asio/ContextWQ.h"
 #include "librbd/io/ObjectDispatchSpec.h"
 #include "librbd/io/ObjectDispatcher.h"
 #include "librbd/io/Utils.h"
@@ -505,12 +505,11 @@ void SimpleSchedulerObjectDispatch<I>::schedule_dispatch_delayed_requests() {
       ldout(cct, 20) << "running timer task " << m_timer_task << dendl;
 
       m_timer_task = nullptr;
-      m_image_ctx->op_work_queue->queue(
-          new LambdaContext(
-            [this, object_no](int r) {
-             std::lock_guard locker{m_lock};
-              dispatch_delayed_requests(object_no);
-            }), 0);
+      m_image_ctx->asio_engine->post(
+        [this, object_no]() {
+          std::lock_guard locker{m_lock};
+          dispatch_delayed_requests(object_no);
+        });
     });
 
   ldout(cct, 20) << "scheduling task " << m_timer_task << " at "