From: Jason Dillaman Date: Mon, 2 May 2016 13:27:29 +0000 (-0400) Subject: librbd: avoid applying refreshed image config within librados callback X-Git-Tag: v10.2.1~27^2~2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=e653a1541ca7df29d01e8524a3f3b597fa0fb67b;p=ceph.git librbd: avoid applying refreshed image config within librados callback There is a potential that a synchronous API call could deadlock a image refresh. Signed-off-by: Jason Dillaman (cherry picked from commit ce5c701bc47b0959f8453b6b92dee4804d3b1d75) --- diff --git a/src/librbd/image/RefreshRequest.cc b/src/librbd/image/RefreshRequest.cc index 289cf5a2844c9..b499b0c86953c 100644 --- a/src/librbd/image/RefreshRequest.cc +++ b/src/librbd/image/RefreshRequest.cc @@ -187,8 +187,28 @@ Context *RefreshRequest::handle_v1_get_locks(int *result) { return m_on_finish; } - apply(); + send_v1_apply(); + return nullptr; +} + +template +void RefreshRequest::send_v1_apply() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << this << " " << __func__ << dendl; + + // ensure we are not in a rados callback when applying updates + using klass = RefreshRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_v1_apply>(this); + m_image_ctx.op_work_queue->queue(ctx, 0); +} + +template +Context *RefreshRequest::handle_v1_apply(int *result) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << this << " " << __func__ << dendl; + apply(); return send_flush_aio(); } @@ -305,17 +325,19 @@ Context *RefreshRequest::handle_v2_get_flags(int *result) { return m_on_finish; } - return send_v2_get_snapshots(); + send_v2_get_snapshots(); + return nullptr; } template -Context *RefreshRequest::send_v2_get_snapshots() { +void RefreshRequest::send_v2_get_snapshots() { if (m_snapc.snaps.empty()) { m_snap_names.clear(); m_snap_sizes.clear(); m_snap_parents.clear(); m_snap_protection.clear(); - return send_v2_refresh_parent(); + send_v2_refresh_parent(); + return; } CephContext *cct = m_image_ctx.cct; @@ -332,7 +354,6 @@ Context *RefreshRequest::send_v2_get_snapshots() { &m_out_bl); assert(r == 0); comp->release(); - return nullptr; } template @@ -358,11 +379,12 @@ Context *RefreshRequest::handle_v2_get_snapshots(int *result) { return m_on_finish; } - return send_v2_refresh_parent(); + send_v2_refresh_parent(); + return nullptr; } template -Context *RefreshRequest::send_v2_refresh_parent() { +void RefreshRequest::send_v2_refresh_parent() { { RWLock::RLocker snap_locker(m_image_ctx.snap_lock); RWLock::RLocker parent_locker(m_image_ctx.parent_lock); @@ -384,9 +406,8 @@ Context *RefreshRequest::send_v2_refresh_parent() { if (m_refresh_parent != nullptr) { m_refresh_parent->send(); - return nullptr; } else { - return send_v2_init_exclusive_lock(); + send_v2_init_exclusive_lock(); } } @@ -399,18 +420,21 @@ Context *RefreshRequest::handle_v2_refresh_parent(int *result) { lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result) << dendl; save_result(result); - return send_v2_finalize_refresh_parent(); + send_v2_apply(); + return nullptr; } - return send_v2_init_exclusive_lock(); + send_v2_init_exclusive_lock(); + return nullptr; } template -Context *RefreshRequest::send_v2_init_exclusive_lock() { +void RefreshRequest::send_v2_init_exclusive_lock() { if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 || m_image_ctx.read_only || !m_image_ctx.snap_name.empty() || m_image_ctx.exclusive_lock != nullptr) { - return send_v2_open_object_map(); + send_v2_open_object_map(); + return; } // implies exclusive lock dynamically enabled or image open in-progress @@ -426,7 +450,6 @@ Context *RefreshRequest::send_v2_init_exclusive_lock() { RWLock::RLocker owner_locker(m_image_ctx.owner_lock); m_exclusive_lock->init(m_features, ctx); - return nullptr; } template @@ -442,11 +465,12 @@ Context *RefreshRequest::handle_v2_init_exclusive_lock(int *result) { // object map and journal will be opened when exclusive lock is // acquired (if features are enabled) - return send_v2_finalize_refresh_parent(); + send_v2_apply(); + return nullptr; } template -Context *RefreshRequest::send_v2_open_journal() { +void RefreshRequest::send_v2_open_journal() { if ((m_features & RBD_FEATURE_JOURNALING) == 0 || m_image_ctx.read_only || !m_image_ctx.snap_name.empty() || @@ -460,7 +484,8 @@ Context *RefreshRequest::send_v2_open_journal() { m_image_ctx.journal == nullptr) { m_image_ctx.aio_work_queue->set_require_lock_on_read(); } - return send_v2_block_writes(); + send_v2_block_writes(); + return; } // implies journal dynamically enabled since ExclusiveLock will init @@ -475,7 +500,6 @@ Context *RefreshRequest::send_v2_open_journal() { // TODO need safe close m_journal = m_image_ctx.create_journal(); m_journal->open(ctx); - return nullptr; } template @@ -489,11 +513,12 @@ Context *RefreshRequest::handle_v2_open_journal(int *result) { save_result(result); } - return send_v2_block_writes(); + send_v2_block_writes(); + return nullptr; } template -Context *RefreshRequest::send_v2_block_writes() { +void RefreshRequest::send_v2_block_writes() { bool disabled_journaling = false; { RWLock::RLocker snap_locker(m_image_ctx.snap_lock); @@ -503,7 +528,8 @@ Context *RefreshRequest::send_v2_block_writes() { } if (!disabled_journaling) { - return send_v2_finalize_refresh_parent(); + send_v2_apply(); + return; } CephContext *cct = m_image_ctx.cct; @@ -517,7 +543,6 @@ Context *RefreshRequest::send_v2_block_writes() { RWLock::RLocker owner_locker(m_image_ctx.owner_lock); m_image_ctx.aio_work_queue->block_writes(ctx); - return nullptr; } template @@ -530,18 +555,20 @@ Context *RefreshRequest::handle_v2_block_writes(int *result) { << dendl; save_result(result); } - return send_v2_finalize_refresh_parent(); + send_v2_apply(); + return nullptr; } template -Context *RefreshRequest::send_v2_open_object_map() { +void RefreshRequest::send_v2_open_object_map() { if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 || m_image_ctx.object_map != nullptr || (m_image_ctx.snap_name.empty() && (m_image_ctx.read_only || m_image_ctx.exclusive_lock == nullptr || !m_image_ctx.exclusive_lock->is_lock_owner()))) { - return send_v2_open_journal(); + send_v2_open_journal(); + return; } // implies object map dynamically enabled or image open in-progress @@ -564,7 +591,8 @@ Context *RefreshRequest::send_v2_open_object_map() { if (m_object_map == nullptr) { lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name << dendl; - return send_v2_open_journal(); + send_v2_open_journal(); + return; } } @@ -572,7 +600,6 @@ Context *RefreshRequest::send_v2_open_object_map() { Context *ctx = create_context_callback< klass, &klass::handle_v2_open_object_map>(this); m_object_map->open(ctx); - return nullptr; } template @@ -581,13 +608,33 @@ Context *RefreshRequest::handle_v2_open_object_map(int *result) { ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; assert(*result == 0); - return send_v2_open_journal(); + send_v2_open_journal(); + return nullptr; } template -Context *RefreshRequest::send_v2_finalize_refresh_parent() { +void RefreshRequest::send_v2_apply() { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << this << " " << __func__ << dendl; + + // ensure we are not in a rados callback when applying updates + using klass = RefreshRequest; + Context *ctx = create_context_callback< + klass, &klass::handle_v2_apply>(this); + m_image_ctx.op_work_queue->queue(ctx, 0); +} + +template +Context *RefreshRequest::handle_v2_apply(int *result) { + CephContext *cct = m_image_ctx.cct; + ldout(cct, 10) << this << " " << __func__ << dendl; + apply(); + return send_v2_finalize_refresh_parent(); +} +template +Context *RefreshRequest::send_v2_finalize_refresh_parent() { if (m_refresh_parent == nullptr) { return send_v2_shut_down_exclusive_lock(); } diff --git a/src/librbd/image/RefreshRequest.h b/src/librbd/image/RefreshRequest.h index fac07fbca54d6..63c858b9141f9 100644 --- a/src/librbd/image/RefreshRequest.h +++ b/src/librbd/image/RefreshRequest.h @@ -139,30 +139,36 @@ private: void send_v1_get_locks(); Context *handle_v1_get_locks(int *result); + void send_v1_apply(); + Context *handle_v1_apply(int *result); + void send_v2_get_mutable_metadata(); Context *handle_v2_get_mutable_metadata(int *result); void send_v2_get_flags(); Context *handle_v2_get_flags(int *result); - Context *send_v2_get_snapshots(); + void send_v2_get_snapshots(); Context *handle_v2_get_snapshots(int *result); - Context *send_v2_refresh_parent(); + void send_v2_refresh_parent(); Context *handle_v2_refresh_parent(int *result); - Context *send_v2_init_exclusive_lock(); + void send_v2_init_exclusive_lock(); Context *handle_v2_init_exclusive_lock(int *result); - Context *send_v2_open_journal(); + void send_v2_open_journal(); Context *handle_v2_open_journal(int *result); - Context *send_v2_block_writes(); + void send_v2_block_writes(); Context *handle_v2_block_writes(int *result); - Context *send_v2_open_object_map(); + void send_v2_open_object_map(); Context *handle_v2_open_object_map(int *result); + void send_v2_apply(); + Context *handle_v2_apply(int *result); + Context *send_v2_finalize_refresh_parent(); Context *handle_v2_finalize_refresh_parent(int *result);