From: Josh Durgin Date: Wed, 25 Feb 2015 01:19:59 +0000 (-0800) Subject: librbd: use snap_lock to protect ImageCtx->size X-Git-Tag: v0.93~6^2~6 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=85825008bc8af27b01c51e45d05c7cabe8929a84;p=ceph.git librbd: use snap_lock to protect ImageCtx->size Since this is often looked up by snap_id anyway, snap_lock is easy to use for this. This lets us avoid taking md_lock in many places. Signed-off-by: Josh Durgin --- diff --git a/src/librbd/AsyncFlattenRequest.cc b/src/librbd/AsyncFlattenRequest.cc index eacdc4bdb556..ac459cdd72c1 100644 --- a/src/librbd/AsyncFlattenRequest.cc +++ b/src/librbd/AsyncFlattenRequest.cc @@ -38,7 +38,7 @@ public: return -ERESTART; } - RWLock::RLocker l2(m_image_ctx.md_lock); + RWLock::RLocker l2(m_image_ctx.snap_lock); uint64_t overlap; { RWLock::RLocker l3(m_image_ctx.parent_lock); diff --git a/src/librbd/AsyncResizeRequest.cc b/src/librbd/AsyncResizeRequest.cc index 1b09aae1d5aa..3edc3324f97b 100644 --- a/src/librbd/AsyncResizeRequest.cc +++ b/src/librbd/AsyncResizeRequest.cc @@ -23,7 +23,7 @@ bool AsyncResizeRequest::should_complete(int r) if (r < 0) { lderr(cct) << "resize encountered an error: " << cpp_strerror(r) << dendl; - RWLock::WLocker l(m_image_ctx.md_lock); + RWLock::WLocker l(m_image_ctx.snap_lock); if (m_image_ctx.size == m_new_size) { m_image_ctx.size = m_original_size; } @@ -96,7 +96,7 @@ void AsyncResizeRequest::send_trim_image() { { // update in-memory size to clip concurrent IO operations - RWLock::WLocker l(m_image_ctx.md_lock); + RWLock::WLocker l(m_image_ctx.snap_lock); m_image_ctx.size = m_new_size; RWLock::WLocker l2(m_image_ctx.parent_lock); @@ -188,13 +188,14 @@ void AsyncResizeRequest::send_update_header() { { RWLock::RLocker l(m_image_ctx.owner_lock); - RWLock::WLocker l2(m_image_ctx.md_lock); if (m_image_ctx.image_watcher->is_lock_supported() && !m_image_ctx.image_watcher->is_lock_owner()) { ldout(m_image_ctx.cct, 1) << "lost exclusive lock during header update" << dendl; lost_exclusive_lock = true; } else { + m_image_ctx.snap_lock.get_write(); m_image_ctx.size = m_new_size; + m_image_ctx.snap_lock.put_write(); librados::ObjectWriteOperation op; if (m_image_ctx.old_format) { diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc index f58f7ffe2fcc..e76b1aa5d425 100644 --- a/src/librbd/ImageCtx.cc +++ b/src/librbd/ImageCtx.cc @@ -366,7 +366,7 @@ namespace librbd { uint64_t ImageCtx::get_current_size() const { - assert(md_lock.is_locked()); + assert(snap_lock.is_locked()); return size; } @@ -436,7 +436,6 @@ namespace librbd { uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const { - assert(md_lock.is_locked()); assert(snap_lock.is_locked()); if (in_snap_id == CEPH_NOSNAP) { return size; diff --git a/src/librbd/ImageCtx.h b/src/librbd/ImageCtx.h index 996309fe1434..cb1d64d737bb 100644 --- a/src/librbd/ImageCtx.h +++ b/src/librbd/ImageCtx.h @@ -76,7 +76,7 @@ namespace librbd { RWLock owner_lock; // protects exclusive lock leadership updates RWLock md_lock; // protects access to the mutable image metadata that // isn't guarded by other locks below - // (size, features, image locks, etc) + // (flush_encountered, etc) Mutex cache_lock; // used as client_lock for the ObjectCacher RWLock snap_lock; // protects snapshot-related member variables, features, and flags RWLock parent_lock; // protects parent_md and parent diff --git a/src/librbd/internal.cc b/src/librbd/internal.cc index 3731e72282f9..447d297ef32c 100644 --- a/src/librbd/internal.cc +++ b/src/librbd/internal.cc @@ -124,11 +124,9 @@ namespace librbd { void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize) { int obj_order = ictx->order; - ictx->md_lock.get_read(); ictx->snap_lock.get_read(); info.size = ictx->get_image_size(ictx->snap_id); ictx->snap_lock.put_read(); - ictx->md_lock.put_read(); info.obj_size = 1ULL << obj_order; info.num_objs = Striper::get_num_objects(ictx->layout, info.size); info.order = obj_order; @@ -163,8 +161,10 @@ namespace librbd { ictx->image_watcher->is_lock_owner()); C_SaferCond *ctx = new C_SaferCond(); + ictx->snap_lock.get_read(); AsyncTrimRequest *req = new AsyncTrimRequest(*ictx, ctx, ictx->size, newsize, prog_ctx); + ictx->snap_lock.put_read(); req->send(); int r = ctx->wait(); @@ -297,7 +297,7 @@ namespace librbd { uint64_t bsize = ictx->get_object_size(); uint64_t numseg; { - RWLock::RLocker l(ictx->md_lock); + RWLock::RLocker l(ictx->snap_lock); numseg = Striper::get_num_objects(ictx->layout, ictx->get_current_size()); } @@ -1088,13 +1088,11 @@ reprotect_and_return_err: goto err_close_parent; } - p_imctx->md_lock.get_read(); p_imctx->snap_lock.get_read(); p_imctx->get_features(p_imctx->snap_id, &p_features); size = p_imctx->get_image_size(p_imctx->snap_id); p_imctx->is_snap_protected(p_imctx->snap_id, &snap_protected); p_imctx->snap_lock.put_read(); - p_imctx->md_lock.put_read(); if ((p_features & RBD_FEATURE_LAYERING) != RBD_FEATURE_LAYERING) { lderr(cct) << "parent image must support layering" << dendl; @@ -1315,7 +1313,6 @@ reprotect_and_return_err: int r = ictx_check(ictx); if (r < 0) return r; - RWLock::RLocker l(ictx->md_lock); RWLock::RLocker l2(ictx->snap_lock); *size = ictx->get_image_size(ictx->snap_id); return 0; @@ -1480,7 +1477,6 @@ reprotect_and_return_err: return r; } - RWLock::RLocker l(ictx->md_lock); RWLock::RLocker l2(ictx->snap_lock); return ictx->get_flags(ictx->snap_id, flags); } @@ -1685,9 +1681,10 @@ reprotect_and_return_err: uint64_t original_size; { RWLock::RLocker l(ictx->md_lock); + ictx->snap_lock.get_read(); original_size = ictx->size; - if (size < ictx->size) { - ictx->flush_async_operations(); + ictx->snap_lock.put_read(); + if (size < original_size) { if (ictx->object_cacher) { // need to invalidate since we're deleting objects, and // ObjectCacher doesn't track non-existent objects @@ -2107,8 +2104,10 @@ reprotect_and_return_err: return -EROFS; } + ictx->snap_lock.get_read(); original_size = ictx->size; new_size = ictx->get_image_size(snap_id); + ictx->snap_lock.put_read(); // need to flush any pending writes before resizing and rolling back - // writes might create new snapshots. Rolling back will replace @@ -2173,13 +2172,11 @@ reprotect_and_return_err: << " -> " << destname << dendl; int order = src->order; - src->md_lock.get_read(); src->snap_lock.get_read(); uint64_t src_features; src->get_features(src->snap_id, &src_features); uint64_t src_size = src->get_image_size(src->snap_id); src->snap_lock.put_read(); - src->md_lock.put_read(); int r = create(dest_md_ctx, destname, src_size, src->old_format, src_features, &order, src->stripe_unit, src->stripe_count); @@ -2256,17 +2253,13 @@ reprotect_and_return_err: int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx) { - src->md_lock.get_read(); src->snap_lock.get_read(); uint64_t src_size = src->get_image_size(src->snap_id); src->snap_lock.put_read(); - src->md_lock.put_read(); - dest->md_lock.get_read(); dest->snap_lock.get_read(); uint64_t dest_size = dest->get_image_size(dest->snap_id); dest->snap_lock.put_read(); - dest->md_lock.put_read(); CephContext *cct = src->cct; if (dest_size < src_size) { @@ -2507,9 +2500,8 @@ reprotect_and_return_err: ::SnapContext snapc; { - RWLock::RLocker l(ictx->md_lock); - RWLock::RLocker l2(ictx->snap_lock); - RWLock::RLocker l3(ictx->parent_lock); + RWLock::RLocker l(ictx->snap_lock); + RWLock::RLocker l2(ictx->parent_lock); // can't flatten a non-clone if (ictx->parent_md.spec.pool_id == -1) { @@ -3129,12 +3121,10 @@ reprotect_and_return_err: // validate extent against image size; clip to image size if necessary int clip_io(ImageCtx *ictx, uint64_t off, uint64_t *len) { - ictx->md_lock.get_read(); ictx->snap_lock.get_read(); uint64_t image_size = ictx->get_image_size(ictx->snap_id); bool snap_exists = ictx->snap_exists; ictx->snap_lock.put_read(); - ictx->md_lock.put_read(); if (!snap_exists) return -ENOENT;