]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
librbd: simplify state machine handling of exclusive lock
authorJason Dillaman <dillaman@redhat.com>
Thu, 30 Apr 2015 20:04:28 +0000 (16:04 -0400)
committerJason Dillaman <dillaman@redhat.com>
Tue, 28 Jul 2015 20:36:35 +0000 (16:36 -0400)
It is expected that all IO is flushed and all async ops are cancelled
prior to releasing the exclusive lock.  Therefore, replace handling of
lost exclusive locks in state machines with an assertion.

Signed-off-by: Jason Dillaman <dillaman@redhat.com>
(cherry picked from commit d6b733dbdd0aeb5d1e136dcbf30c58c80952651e)

src/librbd/AioRequest.cc
src/librbd/AioRequest.h
src/librbd/AsyncFlattenRequest.cc
src/librbd/AsyncResizeRequest.cc
src/librbd/AsyncTrimRequest.cc
src/librbd/AsyncTrimRequest.h

index b4b72da295b242f20ba9f27952c0b83880c7bca7..7f7641352bf455aea99dc19508e2e3234d46a17a 100644 (file)
@@ -377,80 +377,71 @@ namespace librbd {
   }
 
   void AbstractWrite::send() {
+    assert(m_ictx->owner_lock.is_locked());
     ldout(m_ictx->cct, 20) << "send " << this << " " << m_oid << " "
                           << m_object_off << "~" << m_object_len << dendl;
+    send_pre();
+  }
 
-    if (!send_pre()) {
+  void AbstractWrite::send_pre() {
+    assert(m_ictx->owner_lock.is_locked());
+    RWLock::RLocker snap_lock(m_ictx->snap_lock);
+    if (!m_ictx->object_map.enabled()) {
       send_write();
+      return;
     }
-  }
 
-  bool AbstractWrite::send_pre() {
-    bool lost_exclusive_lock = false;
-    {
-      RWLock::RLocker l(m_ictx->owner_lock);
-      if (!m_ictx->object_map.enabled()) {
-       return false;
-      }
+    // should have been flushed prior to releasing lock
+    assert(m_ictx->image_watcher->is_lock_owner());
 
-      if (!m_ictx->image_watcher->is_lock_owner()) {
-       ldout(m_ictx->cct, 1) << "lost exclusive lock during write" << dendl;
-       lost_exclusive_lock = true;
-      } else {
-       ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
-                              << m_object_off << "~" << m_object_len << dendl;
-
-        uint8_t new_state;
-        boost::optional<uint8_t> current_state;
-        pre_object_map_update(&new_state);
-
-        m_state = LIBRBD_AIO_WRITE_PRE;
-        FunctionContext *ctx = new FunctionContext(
-          boost::bind(&AioRequest::complete, this, _1));
-        RWLock::RLocker snap_locker(m_ictx->snap_lock);
-        RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
-        if (!m_ictx->object_map.aio_update(m_object_no, new_state,
-                                           current_state, ctx)) {
-         // no object map update required
-         delete ctx;
-         return false;
-       }
-      }
-    }
+    ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
+                          << m_object_off << "~" << m_object_len << dendl;
+    m_state = LIBRBD_AIO_WRITE_PRE;
 
-    if (lost_exclusive_lock) {
-      complete(-ERESTART);
+    uint8_t new_state;
+    boost::optional<uint8_t> current_state;
+    pre_object_map_update(&new_state);
+
+    RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
+    if (m_ictx->object_map[m_object_no] == new_state) {
+      send_write();
+      return;
     }
-    return true;
+
+    FunctionContext *ctx = new FunctionContext(
+      boost::bind(&AioRequest::complete, this, _1));
+    bool updated = m_ictx->object_map.aio_update(m_object_no, new_state,
+                                                 current_state, ctx);
+    assert(updated);
   }
 
   bool AbstractWrite::send_post() {
-    ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
-                          << m_object_off << "~" << m_object_len << dendl;
-
-    RWLock::RLocker l(m_ictx->owner_lock);
+    RWLock::RLocker owner_locker(m_ictx->owner_lock);
+    RWLock::RLocker snap_locker(m_ictx->snap_lock);
     if (!m_ictx->object_map.enabled() || !post_object_map_update()) {
       return true;
     }
 
-    if (m_ictx->image_watcher->is_lock_supported() &&
-        !m_ictx->image_watcher->is_lock_owner()) {
-      // leave the object flagged as pending
-      ldout(m_ictx->cct, 1) << "lost exclusive lock during write" << dendl;
-      return true;
-    }
+    // should have been flushed prior to releasing lock
+    assert(m_ictx->image_watcher->is_lock_owner());
 
+    ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
+                          << m_object_off << "~" << m_object_len << dendl;
     m_state = LIBRBD_AIO_WRITE_POST;
-    FunctionContext *ctx = new FunctionContext(
-      boost::bind(&AioRequest::complete, this, _1));
-    RWLock::RLocker snap_locker(m_ictx->snap_lock);
+
     RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
-    if (!m_ictx->object_map.aio_update(m_object_no, OBJECT_NONEXISTENT,
-                                       OBJECT_PENDING, ctx)) {
-      // no object map update required
-      delete ctx;
+    uint8_t current_state = m_ictx->object_map[m_object_no];
+    if (current_state != OBJECT_PENDING ||
+        current_state == OBJECT_NONEXISTENT) {
       return true;
     }
+
+    FunctionContext *ctx = new FunctionContext(
+      boost::bind(&AioRequest::complete, this, _1));
+    bool updated = m_ictx->object_map.aio_update(m_object_no,
+                                                 OBJECT_NONEXISTENT,
+                                                OBJECT_PENDING, ctx);
+    assert(updated);
     return false;
   }
 
index 9eb7a636c91bef2d60d2c020a72f6aff61515a30..4fff5ef8b91cbdcbc614cef9a444ccb5433ed9a5 100644 (file)
@@ -186,7 +186,7 @@ namespace librbd {
     }
 
   private:
-    bool send_pre();
+    void send_pre();
     bool send_post();
     void send_write();
     void send_copyup();
index ce0a008fb5a79824cea9348c9d2d903de83586f0..bd1875c8dd7af8bdc0e52c4d306a7b749844cfa3 100644 (file)
@@ -9,11 +9,11 @@
 #include "librbd/ObjectMap.h"
 #include "common/dout.h"
 #include "common/errno.h"
-#include <boost/lambda/bind.hpp> 
-#include <boost/lambda/construct.hpp>  
+#include <boost/lambda/bind.hpp>
+#include <boost/lambda/construct.hpp>
 
 #define dout_subsys ceph_subsys_rbd
-#undef dout_prefix 
+#undef dout_prefix
 #define dout_prefix *_dout << "librbd::AsyncFlattenRequest: "
 
 namespace librbd {
@@ -29,9 +29,9 @@ public:
   }
 
   virtual int send() {
+    assert(m_image_ctx.owner_lock.is_locked());
     CephContext *cct = m_image_ctx.cct;
 
-    RWLock::RLocker l(m_image_ctx.owner_lock);
     if (m_image_ctx.image_watcher->is_lock_supported() &&
         !m_image_ctx.image_watcher->is_lock_owner()) {
       ldout(cct, 1) << "lost exclusive lock during flatten" << dendl;
@@ -89,6 +89,7 @@ bool AsyncFlattenRequest::should_complete(int r) {
 }
 
 void AsyncFlattenRequest::send() {
+  assert(m_image_ctx.owner_lock.is_locked());
   CephContext *cct = m_image_ctx.cct;
   ldout(cct, 5) << this << " send" << dendl;
 
@@ -104,85 +105,71 @@ void AsyncFlattenRequest::send() {
 }
 
 bool AsyncFlattenRequest::send_update_header() {
+  assert(m_image_ctx.owner_lock.is_locked());
   CephContext *cct = m_image_ctx.cct;
-  bool lost_exclusive_lock = false;
 
+  ldout(cct, 5) << this << " send_update_header" << dendl;
   m_state = STATE_UPDATE_HEADER;
-  {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-       !m_image_ctx.image_watcher->is_lock_owner()) {
-      ldout(cct, 1) << "lost exclusive lock during header update" << dendl;
-      lost_exclusive_lock = true;
-    } else {
-      ldout(cct, 5) << this << " send_update_header" << dendl;
 
-      RWLock::RLocker l2(m_image_ctx.parent_lock);
-      // stop early if the parent went away - it just means
-      // another flatten finished first, so this one is useless.
-      if (!m_image_ctx.parent) {
-       ldout(cct, 5) << "image already flattened" << dendl; 
-        return true;
-      }
-      m_ignore_enoent = true;
-      m_parent_spec = m_image_ctx.parent_md.spec;
-
-      // remove parent from this (base) image
-      librados::ObjectWriteOperation op;
-      if (m_image_ctx.image_watcher->is_lock_supported()) {
-        m_image_ctx.image_watcher->assert_header_locked(&op);
-      }
-      cls_client::remove_parent(&op);
-
-      librados::AioCompletion *rados_completion = create_callback_completion();
-      int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
-                                        rados_completion, &op);
-      assert(r == 0);
-      rados_completion->release();
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+
+  {
+    RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
+    // stop early if the parent went away - it just means
+    // another flatten finished first, so this one is useless.
+    if (!m_image_ctx.parent) {
+      ldout(cct, 5) << "image already flattened" << dendl;
+      return true;
     }
+    m_parent_spec = m_image_ctx.parent_md.spec;
   }
+  m_ignore_enoent = true;
 
-  if (lost_exclusive_lock) {
-    complete(-ERESTART);
+  // remove parent from this (base) image
+  librados::ObjectWriteOperation op;
+  if (m_image_ctx.image_watcher->is_lock_supported()) {
+    m_image_ctx.image_watcher->assert_header_locked(&op);
   }
+  cls_client::remove_parent(&op);
+
+  librados::AioCompletion *rados_completion = create_callback_completion();
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                        rados_completion, &op);
+  assert(r == 0);
+  rados_completion->release();
   return false;
 }
 
 bool AsyncFlattenRequest::send_update_children() {
   CephContext *cct = m_image_ctx.cct;
-  bool lost_exclusive_lock = false;
 
-  m_state = STATE_UPDATE_CHILDREN;
-  {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-        !m_image_ctx.image_watcher->is_lock_owner()) {
-      ldout(cct, 1) << "lost exclusive lock during children update" << dendl;
-      lost_exclusive_lock = true;
-    } else {
-      // if there are no snaps, remove from the children object as well
-      // (if snapshots remain, they have their own parent info, and the child
-      // will be removed when the last snap goes away)
-      RWLock::RLocker l2(m_image_ctx.snap_lock);
-      if (!m_image_ctx.snaps.empty()) {
-        return true;
-      }
-
-      ldout(cct, 2) << "removing child from children list..." << dendl;
-      librados::ObjectWriteOperation op;
-      cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id);
-
-      librados::AioCompletion *rados_completion = create_callback_completion();
-      int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, rados_completion,
-                                            &op);
-      assert(r == 0);
-      rados_completion->release();
-    }
-  }  
+  RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
 
-  if (lost_exclusive_lock) {
-    complete(-ERESTART);
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+
+  // if there are no snaps, remove from the children object as well
+  // (if snapshots remain, they have their own parent info, and the child
+  // will be removed when the last snap goes away)
+  RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+  if (!m_image_ctx.snaps.empty()) {
+    return true;
   }
+
+  ldout(cct, 2) << "removing child from children list..." << dendl;
+  m_state = STATE_UPDATE_CHILDREN;
+
+  librados::ObjectWriteOperation op;
+  cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id);
+
+  librados::AioCompletion *rados_completion = create_callback_completion();
+  int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, rados_completion,
+                                    &op);
+  assert(r == 0);
+  rados_completion->release();
   return false;
 }
 
index 621d59d967d328799bc9343c78d0e3bc4038b81c..a8143bbc5e41cfbc408ca12ec4a0494399a9be3b 100644 (file)
@@ -43,6 +43,7 @@ AsyncResizeRequest::~AsyncResizeRequest() {
   }
 
   if (next_req != NULL) {
+    RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
     next_req->send();
   }
 }
@@ -72,7 +73,12 @@ bool AsyncResizeRequest::should_complete(int r) {
     lderr(cct) << "resize encountered an error: " << cpp_strerror(r) << dendl;
     return true;
   }
+  if (m_state == STATE_FINISHED) {
+    ldout(cct, 5) << "FINISHED" << dendl;
+    return true;
+  }
 
+  RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
   switch (m_state) {
   case STATE_FLUSH:
     ldout(cct, 5) << "FLUSH" << dendl;
@@ -109,10 +115,6 @@ bool AsyncResizeRequest::should_complete(int r) {
     increment_refresh_seq();
     return true;
 
-  case STATE_FINISHED:
-    ldout(cct, 5) << "FINISHED" << dendl;
-    return true;
-
   default:
     lderr(cct) << "invalid state: " << m_state << dendl;
     assert(false);
@@ -122,6 +124,7 @@ bool AsyncResizeRequest::should_complete(int r) {
 }
 
 void AsyncResizeRequest::send() {
+  assert(m_image_ctx.owner_lock.is_locked());
   {
     RWLock::RLocker l(m_image_ctx.snap_lock);
     assert(!m_image_ctx.async_resize_reqs.empty());
@@ -163,6 +166,7 @@ void AsyncResizeRequest::send_flush() {
 }
 
 void AsyncResizeRequest::send_invalidate_cache() {
+  assert(m_image_ctx.owner_lock.is_locked());
   ldout(m_image_ctx.cct, 5) << this << " send_invalidate_cache: "
                             << " original_size=" << m_original_size
                             << " new_size=" << m_new_size << dendl;
@@ -174,6 +178,7 @@ void AsyncResizeRequest::send_invalidate_cache() {
 }
 
 void AsyncResizeRequest::send_trim_image() {
+  assert(m_image_ctx.owner_lock.is_locked());
   ldout(m_image_ctx.cct, 5) << this << " send_trim_image: "
                             << " original_size=" << m_original_size
                             << " new_size=" << m_new_size << dendl;
@@ -187,109 +192,76 @@ void AsyncResizeRequest::send_trim_image() {
 }
 
 void AsyncResizeRequest::send_grow_object_map() {
-  bool lost_exclusive_lock = false;
-  bool object_map_enabled = true;
-  {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (!m_image_ctx.object_map.enabled()) {
-      object_map_enabled = false;
-    } else { 
-      ldout(m_image_ctx.cct, 5) << this << " send_grow_object_map: "
-                                << " original_size=" << m_original_size
-                                << " new_size=" << m_new_size << dendl;
-      m_state = STATE_GROW_OBJECT_MAP;
-
-      if (m_image_ctx.image_watcher->is_lock_supported() &&
-         !m_image_ctx.image_watcher->is_lock_owner()) {
-       ldout(m_image_ctx.cct, 1) << "lost exclusive lock during grow object map" << dendl;
-       lost_exclusive_lock = true;
-      } else {
-       m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
-                                          create_callback_context());
-       object_map_enabled = true;
-      }
-    }
-  }
-
-  // avoid possible recursive lock attempts
-  if (!object_map_enabled) {
+  assert(m_image_ctx.owner_lock.is_locked());
+  if (!m_image_ctx.object_map.enabled()) {
     send_update_header();
-  } else if (lost_exclusive_lock) {
-    complete(-ERESTART);
+    return;
   }
+
+  ldout(m_image_ctx.cct, 5) << this << " send_grow_object_map: "
+                            << " original_size=" << m_original_size
+                            << " new_size=" << m_new_size << dendl;
+  m_state = STATE_GROW_OBJECT_MAP;
+
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+
+  m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
+                                   create_callback_context());
 }
 
 bool AsyncResizeRequest::send_shrink_object_map() {
-  bool lost_exclusive_lock = false;
-  {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (!m_image_ctx.object_map.enabled() || m_new_size > m_original_size) {
-      return true;
-    }
-
-    ldout(m_image_ctx.cct, 5) << this << " send_shrink_object_map: "
-                             << " original_size=" << m_original_size
-                             << " new_size=" << m_new_size << dendl;
-    m_state = STATE_SHRINK_OBJECT_MAP;
-
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-        !m_image_ctx.image_watcher->is_lock_owner()) {
-      ldout(m_image_ctx.cct, 1) << "lost exclusive lock during shrink object map" << dendl;
-      lost_exclusive_lock = true;
-    } else {
-      m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
-                                        create_callback_context());
-    }
+  assert(m_image_ctx.owner_lock.is_locked());
+  if (!m_image_ctx.object_map.enabled() || m_new_size > m_original_size) {
+    return true;
   }
 
-  // avoid possible recursive lock attempts
-  if (lost_exclusive_lock) {
-    complete(-ERESTART);
-  }
+  ldout(m_image_ctx.cct, 5) << this << " send_shrink_object_map: "
+                           << " original_size=" << m_original_size
+                           << " new_size=" << m_new_size << dendl;
+  m_state = STATE_SHRINK_OBJECT_MAP;
+
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+
+  m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
+                                   create_callback_context());
   return false;
 }
 
 void AsyncResizeRequest::send_update_header() {
-  bool lost_exclusive_lock = false;
+  assert(m_image_ctx.owner_lock.is_locked());
 
   ldout(m_image_ctx.cct, 5) << this << " send_update_header: "
                             << " original_size=" << m_original_size
                             << " new_size=" << m_new_size << dendl;
   m_state = STATE_UPDATE_HEADER;
 
-  {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-       !m_image_ctx.image_watcher->is_lock_owner()) {
-      ldout(m_image_ctx.cct, 1) << "lost exclusive lock during header update" << dendl;
-      lost_exclusive_lock = true;
-    } else {
-      librados::ObjectWriteOperation op;
-      if (m_image_ctx.old_format) {
-       // rewrite header
-       bufferlist bl;
-       m_image_ctx.header.image_size = m_new_size;
-       bl.append((const char *)&m_image_ctx.header, sizeof(m_image_ctx.header));
-       op.write(0, bl);
-      } else {
-       if (m_image_ctx.image_watcher->is_lock_supported()) {
-         m_image_ctx.image_watcher->assert_header_locked(&op);
-       }
-       cls_client::set_size(&op, m_new_size);
-      }
-
-      librados::AioCompletion *rados_completion = create_callback_completion();
-      int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
-                                            rados_completion, &op);
-      assert(r == 0);
-      rados_completion->release();
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+
+  librados::ObjectWriteOperation op;
+  if (m_image_ctx.old_format) {
+    // rewrite header
+    bufferlist bl;
+    m_image_ctx.header.image_size = m_new_size;
+    bl.append((const char *)&m_image_ctx.header, sizeof(m_image_ctx.header));
+    op.write(0, bl);
+  } else {
+    if (m_image_ctx.image_watcher->is_lock_supported()) {
+      m_image_ctx.image_watcher->assert_header_locked(&op);
     }
+    cls_client::set_size(&op, m_new_size);
   }
 
-  // avoid possible recursive lock attempts
-  if (lost_exclusive_lock) {
-    complete(-ERESTART);
-  }
+  librados::AioCompletion *rados_completion = create_callback_completion();
+  int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+                                    rados_completion, &op);
+  assert(r == 0);
+  rados_completion->release();
 }
 
 void AsyncResizeRequest::compute_parent_overlap() {
index a8bda30996b3c2546d9ea3a0ca68db37d93e023e..20f7102a69cc07abab9285d0fca232f9a08136cc 100644 (file)
@@ -33,16 +33,13 @@ public:
   }
 
   virtual int send() {
+    assert(m_image_ctx.owner_lock.is_locked());
+    assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+           m_image_ctx.image_watcher->is_lock_owner());
     if (!m_image_ctx.object_map.object_may_exist(m_object_no)) {
       return 1;
     }
 
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-        !m_image_ctx.image_watcher->is_lock_owner()) {
-      return -ERESTART;
-    }
-
     string oid = m_image_ctx.get_object_name(m_object_no);
     ldout(m_image_ctx.cct, 10) << "removing " << oid << dendl;
 
@@ -91,26 +88,29 @@ bool AsyncTrimRequest::should_complete(int r)
   switch (m_state) {
   case STATE_PRE_REMOVE:
     ldout(cct, 5) << " PRE_REMOVE" << dendl;
-    send_remove_objects();
-    break; 
+    {
+      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      send_remove_objects();
+    }
+    break;
 
   case STATE_REMOVE_OBJECTS:
     ldout(cct, 5) << " REMOVE_OBJECTS" << dendl;
-    if (send_post_remove()) {
-      return true;
-    }
+    send_post_remove();
     break;
 
   case STATE_POST_REMOVE:
     ldout(cct, 5) << " POST_OBJECTS" << dendl;
-    if (send_clean_boundary()) {
-      return true;
+    {
+      RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+      send_clean_boundary();
     }
     break;
 
   case STATE_CLEAN_BOUNDARY:
     ldout(cct, 5) << "CLEAN_BOUNDARY" << dendl;
-    return true;
+    finish();
+    break;
 
   case STATE_FINISHED:
     ldout(cct, 5) << "FINISHED" << dendl;
@@ -125,19 +125,18 @@ bool AsyncTrimRequest::should_complete(int r)
 }
 
 void AsyncTrimRequest::send() {
+  assert(m_image_ctx.owner_lock.is_locked());
   if (m_delete_start < m_num_objects) {
     send_pre_remove();
   } else {
-    bool finished = send_clean_boundary();
-    if (finished) {
-      m_state = STATE_FINISHED;
-      complete(0);
-    }
+    send_clean_boundary();
   }
 }
 
 void AsyncTrimRequest::send_remove_objects() {
+  assert(m_image_ctx.owner_lock.is_locked());
   CephContext *cct = m_image_ctx.cct;
+
   ldout(m_image_ctx.cct, 5) << this << " send_remove_objects: "
                            << " delete_start=" << m_delete_start
                            << " num_objects=" << m_num_objects << dendl;
@@ -154,10 +153,11 @@ void AsyncTrimRequest::send_remove_objects() {
 }
 
 void AsyncTrimRequest::send_pre_remove() {
+  assert(m_image_ctx.owner_lock.is_locked());
+
   bool remove_objects = false;
-  bool lost_exclusive_lock = false;
   {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
+    RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
     if (!m_image_ctx.object_map.enabled()) {
       remove_objects = true;
     } else {
@@ -166,20 +166,16 @@ void AsyncTrimRequest::send_pre_remove() {
                                << " num_objects=" << m_num_objects << dendl;
       m_state = STATE_PRE_REMOVE;
 
-      if (!m_image_ctx.image_watcher->is_lock_owner()) {
-        ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
-        lost_exclusive_lock = true;
-      } else {
-        // flag the objects as pending deletion
-        Context *ctx = create_callback_context();
-        RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
-        RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
-        if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
-                                              OBJECT_PENDING, OBJECT_EXISTS,
-                                               ctx)) {
-          delete ctx;
-          remove_objects = true;
-        }
+      assert(m_image_ctx.image_watcher->is_lock_owner());
+
+      // flag the objects as pending deletion
+      Context *ctx = create_callback_context();
+      RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+      if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
+                                            OBJECT_PENDING, OBJECT_EXISTS,
+                                             ctx)) {
+        delete ctx;
+        remove_objects = true;
       }
     }
   }
@@ -188,16 +184,15 @@ void AsyncTrimRequest::send_pre_remove() {
   if (remove_objects) {
     // no object map update required
     send_remove_objects();
-  } else if (lost_exclusive_lock) {
-    complete(-ERESTART);
   }
 }
 
-bool AsyncTrimRequest::send_post_remove() {
+void AsyncTrimRequest::send_post_remove() {
+  assert(m_image_ctx.owner_lock.is_locked());
+
   bool clean_boundary = false;
-  bool lost_exclusive_lock = false;
   {
-    RWLock::RLocker l(m_image_ctx.owner_lock);
+    RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
     if (!m_image_ctx.object_map.enabled()) {
       clean_boundary = true;
     } else {
@@ -206,19 +201,16 @@ bool AsyncTrimRequest::send_post_remove() {
                                << " num_objects=" << m_num_objects << dendl;
       m_state = STATE_POST_REMOVE;
 
-      if (!m_image_ctx.image_watcher->is_lock_owner()) {
-        ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
-      } else {
-        // flag the pending objects as removed
-        Context *ctx = create_callback_context();
-        RWLock::RLocker snap_lock(m_image_ctx.snap_lock);
-        RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
-        if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
-                                              OBJECT_NONEXISTENT,
-                                              OBJECT_PENDING, ctx)) {
-          delete ctx;
-         clean_boundary = true;
-       }
+      assert(m_image_ctx.image_watcher->is_lock_owner());
+
+      // flag the pending objects as removed
+      Context *ctx = create_callback_context();
+      RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+      if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
+                                            OBJECT_NONEXISTENT,
+                                            OBJECT_PENDING, ctx)) {
+        delete ctx;
+       clean_boundary = true;
       }
     }
   }
@@ -226,72 +218,61 @@ bool AsyncTrimRequest::send_post_remove() {
   // avoid possible recursive lock attempts
   if (clean_boundary) {
     // no object map update required
-    return send_clean_boundary();
-  } else if (lost_exclusive_lock) {
-    complete(-ERESTART);
+    send_clean_boundary();
   }
-  return false;
 }
 
-bool AsyncTrimRequest::send_clean_boundary() {
+void AsyncTrimRequest::send_clean_boundary() {
+  assert(m_image_ctx.owner_lock.is_locked());
   CephContext *cct = m_image_ctx.cct;
   if (m_delete_off <= m_new_size) {
-    return true;
+    finish();
+    return;
   }
 
-  bool lost_exclusive_lock = false;
-  ContextCompletion *completion = NULL;
+  // should have been canceled prior to releasing lock
+  assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+         m_image_ctx.image_watcher->is_lock_owner());
+  ldout(m_image_ctx.cct, 5) << this << " send_clean_boundary: "
+                           << " delete_start=" << m_delete_start
+                           << " num_objects=" << m_num_objects << dendl;
+  m_state = STATE_CLEAN_BOUNDARY;
+
+  ::SnapContext snapc;
   {
-    ldout(m_image_ctx.cct, 5) << this << " send_clean_boundary: "
-                             << " delete_start=" << m_delete_start
-                             << " num_objects=" << m_num_objects << dendl;
-    m_state = STATE_CLEAN_BOUNDARY;
-
-    RWLock::RLocker l(m_image_ctx.owner_lock);
-    if (m_image_ctx.image_watcher->is_lock_supported() &&
-       !m_image_ctx.image_watcher->is_lock_owner()) {
-      ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
-      lost_exclusive_lock = true;
-    } else {
-      ::SnapContext snapc;
-      {
-        RWLock::RLocker l2(m_image_ctx.snap_lock);
-        snapc = m_image_ctx.snapc;
-      }
+    RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+    snapc = m_image_ctx.snapc;
+  }
 
-      // discard the weird boundary, if any
-      vector<ObjectExtent> extents;
-      Striper::file_to_extents(cct, m_image_ctx.format_string,
-                              &m_image_ctx.layout, m_new_size,
-                              m_delete_off - m_new_size, 0, extents);
-
-      completion = new ContextCompletion(create_callback_context(), true);
-      for (vector<ObjectExtent>::iterator p = extents.begin();
-           p != extents.end(); ++p) {
-        ldout(cct, 20) << " ex " << *p << dendl;
-        Context *req_comp = new C_ContextCompletion(*completion);
-
-        AbstractWrite *req;
-        if (p->offset == 0) {
-          req = new AioRemove(&m_image_ctx, p->oid.name, p->objectno, snapc,
-                              req_comp);
-        } else {
-          req = new AioTruncate(&m_image_ctx, p->oid.name, p->objectno,
-                                p->offset, snapc, req_comp);
-        }
-        req->send();
-      }
+  // discard the weird boundary
+  std::vector<ObjectExtent> extents;
+  Striper::file_to_extents(cct, m_image_ctx.format_string,
+                          &m_image_ctx.layout, m_new_size,
+                          m_delete_off - m_new_size, 0, extents);
+
+  ContextCompletion *completion =
+    new ContextCompletion(create_callback_context(), true);
+  for (vector<ObjectExtent>::iterator p = extents.begin();
+       p != extents.end(); ++p) {
+    ldout(cct, 20) << " ex " << *p << dendl;
+    Context *req_comp = new C_ContextCompletion(*completion);
+
+    AbstractWrite *req;
+    if (p->offset == 0) {
+      req = new AioRemove(&m_image_ctx, p->oid.name, p->objectno, snapc,
+                          req_comp);
+    } else {
+      req = new AioTruncate(&m_image_ctx, p->oid.name, p->objectno,
+                            p->offset, snapc, req_comp);
     }
-
+    req->send();
   }
+  completion->finish_adding_requests();
+}
 
-  // avoid possible recursive lock attempts
-  if (lost_exclusive_lock) {
-    complete(-ERESTART);
-  } else if (completion != NULL) {
-    completion->finish_adding_requests();
-  }
-  return false;
+void AsyncTrimRequest::finish() {
+  m_state = STATE_FINISHED;
+  async_complete(0);
 }
 
 } // namespace librbd
index 7a89a119bb951093a97a9f223d8163a7505bd00d..d4d6af997a32d4c0c3de3bd0dbaaf220ef6a48dd 100644 (file)
@@ -68,8 +68,9 @@ private:
 
   void send_remove_objects();
   void send_pre_remove();
-  bool send_post_remove();
-  bool send_clean_boundary();
+  void send_post_remove();
+  void send_clean_boundary();
+  void finish();
 };
 
 } // namespace librbd