]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: clean up object map update interface, revisited 15460/head
authorNathan Cutler <ncutler@suse.com>
Sat, 26 Aug 2017 08:06:25 +0000 (10:06 +0200)
committerNathan Cutler <ncutler@suse.com>
Sat, 26 Aug 2017 09:04:14 +0000 (11:04 +0200)
In master, the "batch update" change [1] was merged before the "order
concurrent updates" [2], while in jewel the latter is already
backported [3]. A backport of [1] to jewel was attempted, and was
necessarily applied on top of [3] - i.e. in the reverse order compared
to how the commits went into master. This reverse ordering caused the
automated cherry-pick to miss some parts of [1] which this commit is
adding manually.

[1] https://github.com/ceph/ceph/pull/11510
[2] https://github.com/ceph/ceph/pull/12420
[3] https://github.com/ceph/ceph/pull/12909

Signed-off-by: Mykola Golub <mgolub@mirantis.com>
Signed-off-by: Nathan Cutler <ncutler@suse.com>
src/librbd/operation/TrimRequest.cc

index 6e369b10b318b409562f328d814bb27bbf886b61..5e3500779df5f024b56227cd72bbf74284ca257f 100644 (file)
@@ -264,12 +264,9 @@ void TrimRequest<I>::send_pre_copyup() {
   m_copyup_start = m_delete_start;
   m_delete_start = m_copyup_end;
 
-  bool copyup_objects = false;
   {
     RWLock::RLocker snap_locker(image_ctx.snap_lock);
-    if (image_ctx.object_map == nullptr) {
-      copyup_objects = true;
-    } else {
+    if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_pre_copyup: "
                               << " copyup_start=" << m_copyup_start
                               << " copyup_end=" << m_copyup_end << dendl;
@@ -277,19 +274,16 @@ void TrimRequest<I>::send_pre_copyup() {
 
       assert(image_ctx.exclusive_lock->is_lock_owner());
 
-      Context *ctx = this->create_callback_context();
       RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
-      if (!image_ctx.object_map->aio_update(m_copyup_start, m_copyup_end,
-                                            OBJECT_PENDING, OBJECT_EXISTS, ctx)) {
-        delete ctx;
-        copyup_objects = true;
+      if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+            CEPH_NOSNAP, m_copyup_start, m_copyup_end, OBJECT_PENDING,
+            OBJECT_EXISTS, this)) {
+        return;
       }
     }
   }
 
-  if (copyup_objects) {
-    send_copyup_objects();
-  }
+  send_copyup_objects();
 }
 
 template <typename I>
@@ -330,12 +324,9 @@ void TrimRequest<I>::send_post_copyup() {
   I &image_ctx = this->m_image_ctx;
   assert(image_ctx.owner_lock.is_locked());
 
-  bool pre_remove_objects = false;
   {
     RWLock::RLocker snap_locker(image_ctx.snap_lock);
-    if (image_ctx.object_map == nullptr) {
-      pre_remove_objects = true;
-    } else {
+    if (image_ctx.object_map != nullptr) {
       ldout(image_ctx.cct, 5) << this << " send_post_copyup:"
                               << " copyup_start=" << m_copyup_start
                               << " copyup_end=" << m_copyup_end << dendl;
@@ -343,19 +334,16 @@ void TrimRequest<I>::send_post_copyup() {
 
       assert(image_ctx.exclusive_lock->is_lock_owner());
 
-      Context *ctx = this->create_callback_context();
       RWLock::WLocker object_map_locker(image_ctx.object_map_lock);
-      if (!image_ctx.object_map->aio_update(m_copyup_start, m_copyup_end,
-                                            OBJECT_NONEXISTENT, OBJECT_PENDING, ctx)) {
-        delete ctx;
-        pre_remove_objects = true;
+      if (image_ctx.object_map->template aio_update<AsyncRequest<I> >(
+            CEPH_NOSNAP, m_copyup_start, m_copyup_end, OBJECT_NONEXISTENT,
+            OBJECT_PENDING, this)) {
+        return;
       }
     }
   }
 
-  if (pre_remove_objects) {
-    send_pre_remove();
-  }
+  send_pre_remove();
 }
 
 template <typename I>