complete = (m_current_ops == 0);
}
if (complete) {
- m_ctx->complete(m_ret);
+ // avoid re-entrant callback
+ m_image_ctx.op_work_queue->queue(m_ctx, m_ret);
delete this;
}
}
template <typename T>
void AsyncObjectThrottle<T>::finish_op(int r) {
- assert(m_image_ctx.owner_lock.is_locked());
bool complete;
{
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
Mutex::Locker locker(m_lock);
--m_current_ops;
if (r < 0 && r != -ENOENT && m_ret == 0) {
ImageCtxT &m_image_ctx;
virtual void finish(int r) {
- RWLock::RLocker locker(m_image_ctx.owner_lock);
m_finisher.finish_op(r);
}
namespace librbd {
-class AsyncFlattenObjectContext : public C_AsyncObjectThrottle<> {
+class C_FlattenObject : public C_AsyncObjectThrottle<> {
public:
- AsyncFlattenObjectContext(AsyncObjectThrottle<> &throttle,
- ImageCtx *image_ctx, uint64_t object_size,
- ::SnapContext snapc, uint64_t object_no)
+ C_FlattenObject(AsyncObjectThrottle<> &throttle, ImageCtx *image_ctx,
+ uint64_t object_size, ::SnapContext snapc, uint64_t object_no)
: C_AsyncObjectThrottle(throttle, *image_ctx), m_object_size(object_size),
m_snapc(snapc), m_object_no(object_no)
{
return true;
}
+ RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
switch (m_state) {
case STATE_FLATTEN_OBJECTS:
ldout(cct, 5) << "FLATTEN_OBJECTS" << dendl;
m_state = STATE_FLATTEN_OBJECTS;
AsyncObjectThrottle<>::ContextFactory context_factory(
- boost::lambda::bind(boost::lambda::new_ptr<AsyncFlattenObjectContext>(),
+ boost::lambda::bind(boost::lambda::new_ptr<C_FlattenObject>(),
boost::lambda::_1, &m_image_ctx, m_object_size, m_snapc,
boost::lambda::_2));
AsyncObjectThrottle<> *throttle = new AsyncObjectThrottle<>(
}
bool FlattenRequest::send_update_children() {
+ assert(m_image_ctx.owner_lock.is_locked());
CephContext *cct = m_image_ctx.cct;
- RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
-
// should have been canceled prior to releasing lock
assert(!m_image_ctx.image_watcher->is_lock_supported() ||
m_image_ctx.image_watcher->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " should_complete: " << " r=" << r << dendl;
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
switch (m_state) {
case STATE_RESIZE_OBJECT_MAP:
ldout(cct, 5) << "RESIZE_OBJECT_MAP" << dendl;
if (r == -ESTALE && !m_attempted_trim) {
// objects are still flagged as in-use -- delete them
m_attempted_trim = true;
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
send_trim_image();
return false;
} else if (r == 0) {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
send_verify_objects();
}
break;
case STATE_TRIM_IMAGE:
ldout(cct, 5) << "TRIM_IMAGE" << dendl;
if (r == 0) {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
send_resize_object_map();
}
break;
case STATE_VERIFY_OBJECTS:
ldout(cct, 5) << "VERIFY_OBJECTS" << dendl;
if (r == 0) {
- assert(m_image_ctx.owner_lock.is_locked());
send_save_object_map();
}
break;
case STATE_SAVE_OBJECT_MAP:
ldout(cct, 5) << "SAVE_OBJECT_MAP" << dendl;
if (r == 0) {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
send_update_header();
}
break;
return true;
}
+ RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
switch (m_state) {
case STATE_COPYUP_OBJECTS:
ldout(cct, 5) << " COPYUP_OBJECTS" << dendl;
case STATE_PRE_REMOVE:
ldout(cct, 5) << " PRE_REMOVE" << dendl;
- {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
- send_remove_objects();
- }
+ send_remove_objects();
break;
case STATE_REMOVE_OBJECTS:
case STATE_POST_REMOVE:
ldout(cct, 5) << " POST_OBJECTS" << dendl;
- {
- RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
- send_clean_boundary();
- }
+ send_clean_boundary();
break;
case STATE_CLEAN_BOUNDARY: