}
void AbstractWrite::send() {
+ assert(m_ictx->owner_lock.is_locked());
ldout(m_ictx->cct, 20) << "send " << this << " " << m_oid << " "
<< m_object_off << "~" << m_object_len << dendl;
+ send_pre();
+ }
- if (!send_pre()) {
+ void AbstractWrite::send_pre() {
+ assert(m_ictx->owner_lock.is_locked());
+ RWLock::RLocker snap_lock(m_ictx->snap_lock);
+ if (!m_ictx->object_map.enabled()) {
send_write();
+ return;
}
- }
- bool AbstractWrite::send_pre() {
- bool lost_exclusive_lock = false;
- {
- RWLock::RLocker l(m_ictx->owner_lock);
- if (!m_ictx->object_map.enabled()) {
- return false;
- }
+ // should have been flushed prior to releasing lock
+ assert(m_ictx->image_watcher->is_lock_owner());
- if (!m_ictx->image_watcher->is_lock_owner()) {
- ldout(m_ictx->cct, 1) << "lost exclusive lock during write" << dendl;
- lost_exclusive_lock = true;
- } else {
- ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
- << m_object_off << "~" << m_object_len << dendl;
-
- uint8_t new_state;
- boost::optional<uint8_t> current_state;
- pre_object_map_update(&new_state);
-
- m_state = LIBRBD_AIO_WRITE_PRE;
- FunctionContext *ctx = new FunctionContext(
- boost::bind(&AioRequest::complete, this, _1));
- RWLock::RLocker snap_locker(m_ictx->snap_lock);
- RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
- if (!m_ictx->object_map.aio_update(m_object_no, new_state,
- current_state, ctx)) {
- // no object map update required
- delete ctx;
- return false;
- }
- }
- }
+ ldout(m_ictx->cct, 20) << "send_pre " << this << " " << m_oid << " "
+ << m_object_off << "~" << m_object_len << dendl;
+ m_state = LIBRBD_AIO_WRITE_PRE;
- if (lost_exclusive_lock) {
- complete(-ERESTART);
+ uint8_t new_state;
+ boost::optional<uint8_t> current_state;
+ pre_object_map_update(&new_state);
+
+ RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
+ if (m_ictx->object_map[m_object_no] == new_state) {
+ send_write();
+ return;
}
- return true;
+
+ FunctionContext *ctx = new FunctionContext(
+ boost::bind(&AioRequest::complete, this, _1));
+ bool updated = m_ictx->object_map.aio_update(m_object_no, new_state,
+ current_state, ctx);
+ assert(updated);
}
bool AbstractWrite::send_post() {
- ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
- << m_object_off << "~" << m_object_len << dendl;
-
- RWLock::RLocker l(m_ictx->owner_lock);
+ RWLock::RLocker owner_locker(m_ictx->owner_lock);
+ RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (!m_ictx->object_map.enabled() || !post_object_map_update()) {
return true;
}
- if (m_ictx->image_watcher->is_lock_supported() &&
- !m_ictx->image_watcher->is_lock_owner()) {
- // leave the object flagged as pending
- ldout(m_ictx->cct, 1) << "lost exclusive lock during write" << dendl;
- return true;
- }
+ // should have been flushed prior to releasing lock
+ assert(m_ictx->image_watcher->is_lock_owner());
+ ldout(m_ictx->cct, 20) << "send_post " << this << " " << m_oid << " "
+ << m_object_off << "~" << m_object_len << dendl;
m_state = LIBRBD_AIO_WRITE_POST;
- FunctionContext *ctx = new FunctionContext(
- boost::bind(&AioRequest::complete, this, _1));
- RWLock::RLocker snap_locker(m_ictx->snap_lock);
+
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
- if (!m_ictx->object_map.aio_update(m_object_no, OBJECT_NONEXISTENT,
- OBJECT_PENDING, ctx)) {
- // no object map update required
- delete ctx;
+ uint8_t current_state = m_ictx->object_map[m_object_no];
+ if (current_state != OBJECT_PENDING ||
+ current_state == OBJECT_NONEXISTENT) {
return true;
}
+
+ FunctionContext *ctx = new FunctionContext(
+ boost::bind(&AioRequest::complete, this, _1));
+ bool updated = m_ictx->object_map.aio_update(m_object_no,
+ OBJECT_NONEXISTENT,
+ OBJECT_PENDING, ctx);
+ assert(updated);
return false;
}
}
private:
- bool send_pre();
+ void send_pre();
bool send_post();
void send_write();
void send_copyup();
#include "librbd/ObjectMap.h"
#include "common/dout.h"
#include "common/errno.h"
-#include <boost/lambda/bind.hpp>
-#include <boost/lambda/construct.hpp>
+#include <boost/lambda/bind.hpp>
+#include <boost/lambda/construct.hpp>
#define dout_subsys ceph_subsys_rbd
-#undef dout_prefix
+#undef dout_prefix
#define dout_prefix *_dout << "librbd::AsyncFlattenRequest: "
namespace librbd {
}
virtual int send() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
- RWLock::RLocker l(m_image_ctx.owner_lock);
if (m_image_ctx.image_watcher->is_lock_supported() &&
!m_image_ctx.image_watcher->is_lock_owner()) {
ldout(cct, 1) << "lost exclusive lock during flatten" << dendl;
}
void AsyncFlattenRequest::send() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " send" << dendl;
}
bool AsyncFlattenRequest::send_update_header() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
- bool lost_exclusive_lock = false;
+ ldout(cct, 5) << this << " send_update_header" << dendl;
m_state = STATE_UPDATE_HEADER;
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(cct, 1) << "lost exclusive lock during header update" << dendl;
- lost_exclusive_lock = true;
- } else {
- ldout(cct, 5) << this << " send_update_header" << dendl;
- RWLock::RLocker l2(m_image_ctx.parent_lock);
- // stop early if the parent went away - it just means
- // another flatten finished first, so this one is useless.
- if (!m_image_ctx.parent) {
- ldout(cct, 5) << "image already flattened" << dendl;
- return true;
- }
- m_ignore_enoent = true;
- m_parent_spec = m_image_ctx.parent_md.spec;
-
- // remove parent from this (base) image
- librados::ObjectWriteOperation op;
- if (m_image_ctx.image_watcher->is_lock_supported()) {
- m_image_ctx.image_watcher->assert_header_locked(&op);
- }
- cls_client::remove_parent(&op);
-
- librados::AioCompletion *rados_completion = create_callback_completion();
- int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
- rados_completion, &op);
- assert(r == 0);
- rados_completion->release();
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+
+ {
+ RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
+ // stop early if the parent went away - it just means
+ // another flatten finished first, so this one is useless.
+ if (!m_image_ctx.parent) {
+ ldout(cct, 5) << "image already flattened" << dendl;
+ return true;
}
+ m_parent_spec = m_image_ctx.parent_md.spec;
}
+ m_ignore_enoent = true;
- if (lost_exclusive_lock) {
- complete(-ERESTART);
+ // remove parent from this (base) image
+ librados::ObjectWriteOperation op;
+ if (m_image_ctx.image_watcher->is_lock_supported()) {
+ m_image_ctx.image_watcher->assert_header_locked(&op);
}
+ cls_client::remove_parent(&op);
+
+ librados::AioCompletion *rados_completion = create_callback_completion();
+ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+ rados_completion, &op);
+ assert(r == 0);
+ rados_completion->release();
return false;
}
bool AsyncFlattenRequest::send_update_children() {
CephContext *cct = m_image_ctx.cct;
- bool lost_exclusive_lock = false;
- m_state = STATE_UPDATE_CHILDREN;
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(cct, 1) << "lost exclusive lock during children update" << dendl;
- lost_exclusive_lock = true;
- } else {
- // if there are no snaps, remove from the children object as well
- // (if snapshots remain, they have their own parent info, and the child
- // will be removed when the last snap goes away)
- RWLock::RLocker l2(m_image_ctx.snap_lock);
- if (!m_image_ctx.snaps.empty()) {
- return true;
- }
-
- ldout(cct, 2) << "removing child from children list..." << dendl;
- librados::ObjectWriteOperation op;
- cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id);
-
- librados::AioCompletion *rados_completion = create_callback_completion();
- int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, rados_completion,
- &op);
- assert(r == 0);
- rados_completion->release();
- }
- }
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
- if (lost_exclusive_lock) {
- complete(-ERESTART);
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+
+ // if there are no snaps, remove from the children object as well
+ // (if snapshots remain, they have their own parent info, and the child
+ // will be removed when the last snap goes away)
+ RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+ if (!m_image_ctx.snaps.empty()) {
+ return true;
}
+
+ ldout(cct, 2) << "removing child from children list..." << dendl;
+ m_state = STATE_UPDATE_CHILDREN;
+
+ librados::ObjectWriteOperation op;
+ cls_client::remove_child(&op, m_parent_spec, m_image_ctx.id);
+
+ librados::AioCompletion *rados_completion = create_callback_completion();
+ int r = m_image_ctx.md_ctx.aio_operate(RBD_CHILDREN, rados_completion,
+ &op);
+ assert(r == 0);
+ rados_completion->release();
return false;
}
}
if (next_req != NULL) {
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
next_req->send();
}
}
lderr(cct) << "resize encountered an error: " << cpp_strerror(r) << dendl;
return true;
}
+ if (m_state == STATE_FINISHED) {
+ ldout(cct, 5) << "FINISHED" << dendl;
+ return true;
+ }
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
switch (m_state) {
case STATE_FLUSH:
ldout(cct, 5) << "FLUSH" << dendl;
increment_refresh_seq();
return true;
- case STATE_FINISHED:
- ldout(cct, 5) << "FINISHED" << dendl;
- return true;
-
default:
lderr(cct) << "invalid state: " << m_state << dendl;
assert(false);
}
void AsyncResizeRequest::send() {
+ assert(m_image_ctx.owner_lock.is_locked());
{
RWLock::RLocker l(m_image_ctx.snap_lock);
assert(!m_image_ctx.async_resize_reqs.empty());
}
void AsyncResizeRequest::send_invalidate_cache() {
+ assert(m_image_ctx.owner_lock.is_locked());
ldout(m_image_ctx.cct, 5) << this << " send_invalidate_cache: "
<< " original_size=" << m_original_size
<< " new_size=" << m_new_size << dendl;
}
void AsyncResizeRequest::send_trim_image() {
+ assert(m_image_ctx.owner_lock.is_locked());
ldout(m_image_ctx.cct, 5) << this << " send_trim_image: "
<< " original_size=" << m_original_size
<< " new_size=" << m_new_size << dendl;
}
void AsyncResizeRequest::send_grow_object_map() {
- bool lost_exclusive_lock = false;
- bool object_map_enabled = true;
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (!m_image_ctx.object_map.enabled()) {
- object_map_enabled = false;
- } else {
- ldout(m_image_ctx.cct, 5) << this << " send_grow_object_map: "
- << " original_size=" << m_original_size
- << " new_size=" << m_new_size << dendl;
- m_state = STATE_GROW_OBJECT_MAP;
-
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during grow object map" << dendl;
- lost_exclusive_lock = true;
- } else {
- m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
- create_callback_context());
- object_map_enabled = true;
- }
- }
- }
-
- // avoid possible recursive lock attempts
- if (!object_map_enabled) {
+ assert(m_image_ctx.owner_lock.is_locked());
+ if (!m_image_ctx.object_map.enabled()) {
send_update_header();
- } else if (lost_exclusive_lock) {
- complete(-ERESTART);
+ return;
}
+
+ ldout(m_image_ctx.cct, 5) << this << " send_grow_object_map: "
+ << " original_size=" << m_original_size
+ << " new_size=" << m_new_size << dendl;
+ m_state = STATE_GROW_OBJECT_MAP;
+
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+
+ m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
+ create_callback_context());
}
bool AsyncResizeRequest::send_shrink_object_map() {
- bool lost_exclusive_lock = false;
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (!m_image_ctx.object_map.enabled() || m_new_size > m_original_size) {
- return true;
- }
-
- ldout(m_image_ctx.cct, 5) << this << " send_shrink_object_map: "
- << " original_size=" << m_original_size
- << " new_size=" << m_new_size << dendl;
- m_state = STATE_SHRINK_OBJECT_MAP;
-
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during shrink object map" << dendl;
- lost_exclusive_lock = true;
- } else {
- m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
- create_callback_context());
- }
+ assert(m_image_ctx.owner_lock.is_locked());
+ if (!m_image_ctx.object_map.enabled() || m_new_size > m_original_size) {
+ return true;
}
- // avoid possible recursive lock attempts
- if (lost_exclusive_lock) {
- complete(-ERESTART);
- }
+ ldout(m_image_ctx.cct, 5) << this << " send_shrink_object_map: "
+ << " original_size=" << m_original_size
+ << " new_size=" << m_new_size << dendl;
+ m_state = STATE_SHRINK_OBJECT_MAP;
+
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+
+ m_image_ctx.object_map.aio_resize(m_new_size, OBJECT_NONEXISTENT,
+ create_callback_context());
return false;
}
void AsyncResizeRequest::send_update_header() {
- bool lost_exclusive_lock = false;
+ assert(m_image_ctx.owner_lock.is_locked());
ldout(m_image_ctx.cct, 5) << this << " send_update_header: "
<< " original_size=" << m_original_size
<< " new_size=" << m_new_size << dendl;
m_state = STATE_UPDATE_HEADER;
- {
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during header update" << dendl;
- lost_exclusive_lock = true;
- } else {
- librados::ObjectWriteOperation op;
- if (m_image_ctx.old_format) {
- // rewrite header
- bufferlist bl;
- m_image_ctx.header.image_size = m_new_size;
- bl.append((const char *)&m_image_ctx.header, sizeof(m_image_ctx.header));
- op.write(0, bl);
- } else {
- if (m_image_ctx.image_watcher->is_lock_supported()) {
- m_image_ctx.image_watcher->assert_header_locked(&op);
- }
- cls_client::set_size(&op, m_new_size);
- }
-
- librados::AioCompletion *rados_completion = create_callback_completion();
- int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
- rados_completion, &op);
- assert(r == 0);
- rados_completion->release();
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+
+ librados::ObjectWriteOperation op;
+ if (m_image_ctx.old_format) {
+ // rewrite header
+ bufferlist bl;
+ m_image_ctx.header.image_size = m_new_size;
+ bl.append((const char *)&m_image_ctx.header, sizeof(m_image_ctx.header));
+ op.write(0, bl);
+ } else {
+ if (m_image_ctx.image_watcher->is_lock_supported()) {
+ m_image_ctx.image_watcher->assert_header_locked(&op);
}
+ cls_client::set_size(&op, m_new_size);
}
- // avoid possible recursive lock attempts
- if (lost_exclusive_lock) {
- complete(-ERESTART);
- }
+ librados::AioCompletion *rados_completion = create_callback_completion();
+ int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid,
+ rados_completion, &op);
+ assert(r == 0);
+ rados_completion->release();
}
void AsyncResizeRequest::compute_parent_overlap() {
}
virtual int send() {
+ assert(m_image_ctx.owner_lock.is_locked());
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
if (!m_image_ctx.object_map.object_may_exist(m_object_no)) {
return 1;
}
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- return -ERESTART;
- }
-
string oid = m_image_ctx.get_object_name(m_object_no);
ldout(m_image_ctx.cct, 10) << "removing " << oid << dendl;
switch (m_state) {
case STATE_PRE_REMOVE:
ldout(cct, 5) << " PRE_REMOVE" << dendl;
- send_remove_objects();
- break;
+ {
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ send_remove_objects();
+ }
+ break;
case STATE_REMOVE_OBJECTS:
ldout(cct, 5) << " REMOVE_OBJECTS" << dendl;
- if (send_post_remove()) {
- return true;
- }
+ send_post_remove();
break;
case STATE_POST_REMOVE:
ldout(cct, 5) << " POST_OBJECTS" << dendl;
- if (send_clean_boundary()) {
- return true;
+ {
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
+ send_clean_boundary();
}
break;
case STATE_CLEAN_BOUNDARY:
ldout(cct, 5) << "CLEAN_BOUNDARY" << dendl;
- return true;
+ finish();
+ break;
case STATE_FINISHED:
ldout(cct, 5) << "FINISHED" << dendl;
}
void AsyncTrimRequest::send() {
+ assert(m_image_ctx.owner_lock.is_locked());
if (m_delete_start < m_num_objects) {
send_pre_remove();
} else {
- bool finished = send_clean_boundary();
- if (finished) {
- m_state = STATE_FINISHED;
- complete(0);
- }
+ send_clean_boundary();
}
}
void AsyncTrimRequest::send_remove_objects() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
+
ldout(m_image_ctx.cct, 5) << this << " send_remove_objects: "
<< " delete_start=" << m_delete_start
<< " num_objects=" << m_num_objects << dendl;
}
void AsyncTrimRequest::send_pre_remove() {
+ assert(m_image_ctx.owner_lock.is_locked());
+
bool remove_objects = false;
- bool lost_exclusive_lock = false;
{
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
if (!m_image_ctx.object_map.enabled()) {
remove_objects = true;
} else {
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_PRE_REMOVE;
- if (!m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
- lost_exclusive_lock = true;
- } else {
- // flag the objects as pending deletion
- Context *ctx = create_callback_context();
- RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
- if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
- OBJECT_PENDING, OBJECT_EXISTS,
- ctx)) {
- delete ctx;
- remove_objects = true;
- }
+ assert(m_image_ctx.image_watcher->is_lock_owner());
+
+ // flag the objects as pending deletion
+ Context *ctx = create_callback_context();
+ RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
+ OBJECT_PENDING, OBJECT_EXISTS,
+ ctx)) {
+ delete ctx;
+ remove_objects = true;
}
}
}
if (remove_objects) {
// no object map update required
send_remove_objects();
- } else if (lost_exclusive_lock) {
- complete(-ERESTART);
}
}
-bool AsyncTrimRequest::send_post_remove() {
+void AsyncTrimRequest::send_post_remove() {
+ assert(m_image_ctx.owner_lock.is_locked());
+
bool clean_boundary = false;
- bool lost_exclusive_lock = false;
{
- RWLock::RLocker l(m_image_ctx.owner_lock);
+ RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
if (!m_image_ctx.object_map.enabled()) {
clean_boundary = true;
} else {
<< " num_objects=" << m_num_objects << dendl;
m_state = STATE_POST_REMOVE;
- if (!m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
- } else {
- // flag the pending objects as removed
- Context *ctx = create_callback_context();
- RWLock::RLocker snap_lock(m_image_ctx.snap_lock);
- RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
- if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
- OBJECT_NONEXISTENT,
- OBJECT_PENDING, ctx)) {
- delete ctx;
- clean_boundary = true;
- }
+ assert(m_image_ctx.image_watcher->is_lock_owner());
+
+ // flag the pending objects as removed
+ Context *ctx = create_callback_context();
+ RWLock::WLocker object_map_locker(m_image_ctx.object_map_lock);
+ if (!m_image_ctx.object_map.aio_update(m_delete_start, m_num_objects,
+ OBJECT_NONEXISTENT,
+ OBJECT_PENDING, ctx)) {
+ delete ctx;
+ clean_boundary = true;
}
}
}
// avoid possible recursive lock attempts
if (clean_boundary) {
// no object map update required
- return send_clean_boundary();
- } else if (lost_exclusive_lock) {
- complete(-ERESTART);
+ send_clean_boundary();
}
- return false;
}
-bool AsyncTrimRequest::send_clean_boundary() {
+void AsyncTrimRequest::send_clean_boundary() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
if (m_delete_off <= m_new_size) {
- return true;
+ finish();
+ return;
}
- bool lost_exclusive_lock = false;
- ContextCompletion *completion = NULL;
+ // should have been canceled prior to releasing lock
+ assert(!m_image_ctx.image_watcher->is_lock_supported() ||
+ m_image_ctx.image_watcher->is_lock_owner());
+ ldout(m_image_ctx.cct, 5) << this << " send_clean_boundary: "
+ << " delete_start=" << m_delete_start
+ << " num_objects=" << m_num_objects << dendl;
+ m_state = STATE_CLEAN_BOUNDARY;
+
+ ::SnapContext snapc;
{
- ldout(m_image_ctx.cct, 5) << this << " send_clean_boundary: "
- << " delete_start=" << m_delete_start
- << " num_objects=" << m_num_objects << dendl;
- m_state = STATE_CLEAN_BOUNDARY;
-
- RWLock::RLocker l(m_image_ctx.owner_lock);
- if (m_image_ctx.image_watcher->is_lock_supported() &&
- !m_image_ctx.image_watcher->is_lock_owner()) {
- ldout(m_image_ctx.cct, 1) << "lost exclusive lock during trim" << dendl;
- lost_exclusive_lock = true;
- } else {
- ::SnapContext snapc;
- {
- RWLock::RLocker l2(m_image_ctx.snap_lock);
- snapc = m_image_ctx.snapc;
- }
+ RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
+ snapc = m_image_ctx.snapc;
+ }
- // discard the weird boundary, if any
- vector<ObjectExtent> extents;
- Striper::file_to_extents(cct, m_image_ctx.format_string,
- &m_image_ctx.layout, m_new_size,
- m_delete_off - m_new_size, 0, extents);
-
- completion = new ContextCompletion(create_callback_context(), true);
- for (vector<ObjectExtent>::iterator p = extents.begin();
- p != extents.end(); ++p) {
- ldout(cct, 20) << " ex " << *p << dendl;
- Context *req_comp = new C_ContextCompletion(*completion);
-
- AbstractWrite *req;
- if (p->offset == 0) {
- req = new AioRemove(&m_image_ctx, p->oid.name, p->objectno, snapc,
- req_comp);
- } else {
- req = new AioTruncate(&m_image_ctx, p->oid.name, p->objectno,
- p->offset, snapc, req_comp);
- }
- req->send();
- }
+ // discard the weird boundary
+ std::vector<ObjectExtent> extents;
+ Striper::file_to_extents(cct, m_image_ctx.format_string,
+ &m_image_ctx.layout, m_new_size,
+ m_delete_off - m_new_size, 0, extents);
+
+ ContextCompletion *completion =
+ new ContextCompletion(create_callback_context(), true);
+ for (vector<ObjectExtent>::iterator p = extents.begin();
+ p != extents.end(); ++p) {
+ ldout(cct, 20) << " ex " << *p << dendl;
+ Context *req_comp = new C_ContextCompletion(*completion);
+
+ AbstractWrite *req;
+ if (p->offset == 0) {
+ req = new AioRemove(&m_image_ctx, p->oid.name, p->objectno, snapc,
+ req_comp);
+ } else {
+ req = new AioTruncate(&m_image_ctx, p->oid.name, p->objectno,
+ p->offset, snapc, req_comp);
}
-
+ req->send();
}
+ completion->finish_adding_requests();
+}
- // avoid possible recursive lock attempts
- if (lost_exclusive_lock) {
- complete(-ERESTART);
- } else if (completion != NULL) {
- completion->finish_adding_requests();
- }
- return false;
+void AsyncTrimRequest::finish() {
+ m_state = STATE_FINISHED;
+ async_complete(0);
}
} // namespace librbd
void send_remove_objects();
void send_pre_remove();
- bool send_post_remove();
- bool send_clean_boundary();
+ void send_post_remove();
+ void send_clean_boundary();
+ void finish();
};
} // namespace librbd