]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: ensure that thread pool lock is held when processing throttled IOs 37895/head
authorJason Dillaman <dillaman@redhat.com>
Fri, 11 Sep 2020 19:20:45 +0000 (15:20 -0400)
committerNathan Cutler <ncutler@suse.com>
Thu, 3 Dec 2020 19:03:30 +0000 (20:03 +0100)
There previously was a potential race for throttled IOs to complete prior
to the main worker thread finishing the processing of the blocked IO.

Fixes: https://tracker.ceph.com/issues/47371
Signed-off-by: Jason Dillaman <dillaman@redhat.com>
(cherry picked from commit 2d86e0935aa6f0c392df428676d9ab0a338fccae)

Conflicts:
    src/test/librbd/io/test_mock_ImageRequestWQ.cc
- in Octopus, commit 792d6c53fedc695199cc18916347c1b545fe42c2 did a global
  replace of Mutex to ceph::mutex, so to fix this for Nautilus, we just need to
  do that in test_mock_ImageRequestWQ.cc since the get_pool_lock() method is
  returning a Mutex instead of a ceph::mutex

src/librbd/io/ImageRequestWQ.cc
src/test/librbd/io/test_mock_ImageRequestWQ.cc

index b9c508f649d8ad6078de6d452003b168a808c181..7bdaf2f25a4252608bd0413fc2dd11d26bf19d97 100644 (file)
@@ -730,21 +730,23 @@ void ImageRequestWQ<I>::apply_qos_limit(const uint64_t flag,
 }
 
 template <typename I>
-void ImageRequestWQ<I>::handle_throttle_ready(int r, ImageDispatchSpec<I> *item, uint64_t flag) {
+void ImageRequestWQ<I>::handle_throttle_ready(int r, ImageDispatchSpec<I> *item,
+                                              uint64_t flag) {
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 15) << "r=" << r << ", " << "req=" << item << dendl;
 
+  std::lock_guard pool_locker{this->get_pool_lock()};
   ceph_assert(m_io_throttled.load() > 0);
   item->set_throttled(flag);
   if (item->were_all_throttled()) {
-    this->requeue_back(item);
+    this->requeue_back(pool_locker, item);
     --m_io_throttled;
-    this->signal();
+    this->signal(pool_locker);
   }
 }
 
 template <typename I>
-bool ImageRequestWQ<I>::needs_throttle(ImageDispatchSpec<I> *item) { 
+bool ImageRequestWQ<I>::needs_throttle(ImageDispatchSpec<I> *item) {
   uint64_t tokens = 0;
   uint64_t flag = 0;
   bool blocked = false;
index 50daa83c777bc506e9a58127d39a69939d0f9863..7b91d68fbaddfb648dd2de49264c444bae924159 100644 (file)
@@ -88,10 +88,10 @@ struct ThreadPool::PointerWQ<librbd::io::ImageDispatchSpec<librbd::MockTestImage
   typedef librbd::io::ImageDispatchSpec<librbd::MockTestImageCtx> ImageDispatchSpec;
   static PointerWQ* s_instance;
 
-  Mutex m_lock;
+  ceph::mutex m_lock;
 
   PointerWQ(const std::string &name, time_t, int, ThreadPool *)
-    : m_lock(name) {
+    : m_lock(ceph::make_mutex(name)) {
     s_instance = this;
   }
   virtual ~PointerWQ() {
@@ -101,11 +101,13 @@ struct ThreadPool::PointerWQ<librbd::io::ImageDispatchSpec<librbd::MockTestImage
   MOCK_METHOD0(empty, bool());
   MOCK_METHOD0(mock_empty, bool());
   MOCK_METHOD0(signal, void());
+  MOCK_METHOD1(signal, void(const std::lock_guard<ceph::mutex>&));
   MOCK_METHOD0(process_finish, void());
 
   MOCK_METHOD0(front, ImageDispatchSpec*());
   MOCK_METHOD1(requeue_front, void(ImageDispatchSpec*));
-  MOCK_METHOD1(requeue_back, void(ImageDispatchSpec*));
+  MOCK_METHOD2(requeue_back, void(const std::lock_guard<ceph::mutex>&,
+                                  ImageDispatchSpec*));
 
   MOCK_METHOD0(dequeue, void*());
   MOCK_METHOD1(queue, void(ImageDispatchSpec*));
@@ -113,12 +115,12 @@ struct ThreadPool::PointerWQ<librbd::io::ImageDispatchSpec<librbd::MockTestImage
   void register_work_queue() {
     // no-op
   }
-  Mutex &get_pool_lock() {
+  ceph::mutex &get_pool_lock() {
     return m_lock;
   }
 
   void* invoke_dequeue() {
-    Mutex::Locker locker(m_lock);
+    std::lock_guard locker{m_lock};
     return _void_dequeue();
   }
   void invoke_process(ImageDispatchSpec *image_request) {
@@ -167,12 +169,16 @@ struct TestMockIoImageRequestWQ : public TestMockFixture {
     EXPECT_CALL(image_request_wq, signal());
   }
 
+  void expect_signal_locked(MockImageRequestWQ &image_request_wq) {
+    EXPECT_CALL(image_request_wq, signal(_));
+  }
+
   void expect_queue(MockImageRequestWQ &image_request_wq) {
     EXPECT_CALL(image_request_wq, queue(_));
   }
 
   void expect_requeue_back(MockImageRequestWQ &image_request_wq) {
-    EXPECT_CALL(image_request_wq, requeue_back(_));
+    EXPECT_CALL(image_request_wq, requeue_back(_, _));
   }
 
   void expect_front(MockImageRequestWQ &image_request_wq,
@@ -427,7 +433,7 @@ TEST_F(TestMockIoImageRequestWQ, BPSQosNoBurst) {
   expect_dequeue(mock_image_request_wq, &mock_queued_image_request);
   expect_all_throttled(mock_queued_image_request, true);
   expect_requeue_back(mock_image_request_wq);
-  expect_signal(mock_image_request_wq);
+  expect_signal_locked(mock_image_request_wq);
   ASSERT_TRUE(mock_image_request_wq.invoke_dequeue() == nullptr);
 }
 
@@ -451,7 +457,7 @@ TEST_F(TestMockIoImageRequestWQ, BPSQosWithBurst) {
   expect_dequeue(mock_image_request_wq, &mock_queued_image_request);
   expect_all_throttled(mock_queued_image_request, true);
   expect_requeue_back(mock_image_request_wq);
-  expect_signal(mock_image_request_wq);
+  expect_signal_locked(mock_image_request_wq);
   ASSERT_TRUE(mock_image_request_wq.invoke_dequeue() == nullptr);
 }