From: Jason Dillaman Date: Thu, 25 Aug 2016 14:42:36 +0000 (-0400) Subject: librbd: remove use of owner_lock on IO path X-Git-Tag: v11.0.1~363^2~8 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=551d7eae126e4ec9d856726f9dcef97da8dd2b03;p=ceph.git librbd: remove use of owner_lock on IO path IO is fully flushed before releasing the exclusive lock so holding the owner_lock isn't required. Signed-off-by: Jason Dillaman --- diff --git a/src/librbd/AioImageRequest.cc b/src/librbd/AioImageRequest.cc index 010283ff4016c..aa34753e15b68 100644 --- a/src/librbd/AioImageRequest.cc +++ b/src/librbd/AioImageRequest.cc @@ -189,7 +189,6 @@ void AioImageRequest::aio_flush(I *ictx, AioCompletion *c) { template void AioImageRequest::send() { I &image_ctx = this->m_image_ctx; - assert(image_ctx.owner_lock.is_locked()); assert(m_aio_comp->is_initialized(get_aio_type())); assert(m_aio_comp->is_started() ^ (get_aio_type() == AIO_TYPE_FLUSH)); diff --git a/src/librbd/AioImageRequestWQ.cc b/src/librbd/AioImageRequestWQ.cc index f8c08f0c90828..08cdadb244001 100644 --- a/src/librbd/AioImageRequestWQ.cc +++ b/src/librbd/AioImageRequestWQ.cc @@ -351,10 +351,7 @@ void AioImageRequestWQ::process(AioImageRequest<> *req) { ldout(cct, 20) << __func__ << ": ictx=" << &m_image_ctx << ", " << "req=" << req << dendl; - { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - req->send(); - } + req->send(); finish_queued_op(req); if (req->is_write_op()) { @@ -388,7 +385,6 @@ void AioImageRequestWQ::finish_in_progress_write() { } if (writes_blocked) { - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); m_image_ctx.flush(new C_BlockedWrites(this)); } } @@ -409,19 +405,20 @@ int AioImageRequestWQ::start_in_flight_op(AioCompletion *c) { } void AioImageRequestWQ::finish_in_flight_op() { + Context *on_shutdown; { RWLock::RLocker locker(m_lock); if (m_in_flight_ops.dec() > 0 || !m_shutdown) { return; } + on_shutdown = m_on_shutdown; } CephContext *cct = m_image_ctx.cct; ldout(cct, 5) << __func__ << ": completing shut down" << dendl; - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); - assert(m_on_shutdown != nullptr); - m_image_ctx.flush(m_on_shutdown); + assert(on_shutdown != nullptr); + m_image_ctx.flush(on_shutdown); } bool AioImageRequestWQ::is_lock_required() const { diff --git a/src/librbd/AioObjectRequest.cc b/src/librbd/AioObjectRequest.cc index 8335c607f7fe8..5a5f1e0742203 100644 --- a/src/librbd/AioObjectRequest.cc +++ b/src/librbd/AioObjectRequest.cc @@ -128,7 +128,6 @@ bool AioObjectRequest::compute_parent_extents() { } static inline bool is_copy_on_read(ImageCtx *ictx, librados::snap_t snap_id) { - assert(ictx->owner_lock.is_locked()); assert(ictx->snap_lock.is_locked()); return (ictx->clone_copy_on_read && !ictx->read_only && snap_id == CEPH_NOSNAP && @@ -184,7 +183,6 @@ bool AioObjectRead::should_complete(int r) // This is the step to read from parent if (!m_tried_parent && r == -ENOENT) { { - RWLock::RLocker owner_locker(image_ctx->owner_lock); RWLock::RLocker snap_locker(image_ctx->snap_lock); RWLock::RLocker parent_locker(image_ctx->parent_lock); if (image_ctx->parent == NULL) { @@ -291,7 +289,6 @@ void AioObjectRead::send_copyup() { ImageCtx *image_ctx = this->m_ictx; { - RWLock::RLocker owner_locker(image_ctx->owner_lock); RWLock::RLocker snap_locker(image_ctx->snap_lock); RWLock::RLocker parent_locker(image_ctx->parent_lock); if (!this->compute_parent_extents() || @@ -325,7 +322,6 @@ void AioObjectRead::read_from_parent(const Extents& parent_extents) << " parent completion " << parent_completion << " extents " << parent_extents << dendl; - RWLock::RLocker owner_locker(image_ctx->parent->owner_lock); AioImageRequest<>::aio_read(image_ctx->parent, parent_completion, parent_extents, NULL, &m_read_data, 0); } @@ -428,7 +424,6 @@ bool AbstractAioObjectWrite::should_complete(int r) } void AbstractAioObjectWrite::send() { - assert(m_ictx->owner_lock.is_locked()); ldout(m_ictx->cct, 20) << "send " << get_write_type() << " " << this <<" " << m_oid << " " << m_object_off << "~" << m_object_len << dendl; @@ -436,8 +431,6 @@ void AbstractAioObjectWrite::send() { } void AbstractAioObjectWrite::send_pre() { - assert(m_ictx->owner_lock.is_locked()); - bool write = false; { RWLock::RLocker snap_lock(m_ictx->snap_lock); @@ -477,7 +470,6 @@ void AbstractAioObjectWrite::send_pre() { } bool AbstractAioObjectWrite::send_post() { - RWLock::RLocker owner_locker(m_ictx->owner_lock); RWLock::RLocker snap_locker(m_ictx->snap_lock); if (m_ictx->object_map == nullptr || !post_object_map_update()) { return true; diff --git a/src/librbd/CopyupRequest.cc b/src/librbd/CopyupRequest.cc index b95544b28494a..56524d3c86fc6 100644 --- a/src/librbd/CopyupRequest.cc +++ b/src/librbd/CopyupRequest.cc @@ -39,7 +39,6 @@ public: } virtual int send() { - assert(m_image_ctx.owner_lock.is_locked()); uint64_t snap_id = m_snap_ids[m_snap_id_idx]; if (snap_id == CEPH_NOSNAP) { RWLock::RLocker snap_locker(m_image_ctx.snap_lock); @@ -189,7 +188,6 @@ void CopyupRequest::send() << ", oid " << m_oid << ", extents " << m_image_extents << dendl; - RWLock::RLocker owner_locker(m_ictx->parent->owner_lock); AioImageRequest<>::aio_read(m_ictx->parent, comp, m_image_extents, NULL, &m_copyup_data, 0); } @@ -260,7 +258,6 @@ void CopyupRequest::remove_from_list() bool CopyupRequest::send_object_map() { { - RWLock::RLocker owner_locker(m_ictx->owner_lock); RWLock::RLocker snap_locker(m_ictx->snap_lock); if (m_ictx->object_map != nullptr) { bool copy_on_read = m_pending_requests.empty(); diff --git a/src/librbd/ImageCtx.cc b/src/librbd/ImageCtx.cc index 748d11a2aa53c..e91030f093d0d 100644 --- a/src/librbd/ImageCtx.cc +++ b/src/librbd/ImageCtx.cc @@ -87,7 +87,6 @@ struct C_FlushCache : public Context { } virtual void finish(int r) { // successful cache flush indicates all IO is now safe - assert(image_ctx->owner_lock.is_locked()); image_ctx->flush_cache(on_safe); } }; @@ -734,7 +733,6 @@ struct C_InvalidateCache : public Context { } void ImageCtx::flush_cache(Context *onfinish) { - assert(owner_lock.is_locked()); cache_lock.Lock(); object_cacher->flush_set(object_set, onfinish); cache_lock.Unlock(); @@ -746,7 +744,6 @@ struct C_InvalidateCache : public Context { return; } - RWLock::RLocker owner_locker(owner_lock); cache_lock.Lock(); object_cacher->release_set(object_set); cache_lock.Unlock(); @@ -849,7 +846,6 @@ struct C_InvalidateCache : public Context { // ensure no locks are held when flush is complete on_safe = util::create_async_context_callback(*this, on_safe); - assert(owner_lock.is_locked()); if (object_cacher != NULL) { // flush cache after completing all in-flight AIO ops on_safe = new C_FlushCache(this, on_safe); diff --git a/src/librbd/Journal.cc b/src/librbd/Journal.cc index f87f97ca6c374..a82e84578c389 100644 --- a/src/librbd/Journal.cc +++ b/src/librbd/Journal.cc @@ -789,8 +789,6 @@ uint64_t Journal::append_write_event(uint64_t offset, size_t length, const bufferlist &bl, const AioObjectRequests &requests, bool flush_entry) { - assert(m_image_ctx.owner_lock.is_locked()); - assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size()); uint64_t max_write_data_size = m_max_append_size - journal::AioWriteEvent::get_fixed_size(); @@ -824,8 +822,6 @@ uint64_t Journal::append_io_event(journal::EventEntry &&event_entry, const AioObjectRequests &requests, uint64_t offset, size_t length, bool flush_entry) { - assert(m_image_ctx.owner_lock.is_locked()); - bufferlist bl; ::encode(event_entry, bl); return append_io_events(event_entry.get_event_type(), {bl}, requests, offset, @@ -838,7 +834,6 @@ uint64_t Journal::append_io_events(journal::EventType event_type, const AioObjectRequests &requests, uint64_t offset, size_t length, bool flush_entry) { - assert(m_image_ctx.owner_lock.is_locked()); assert(!bufferlists.empty()); Futures futures; @@ -1587,7 +1582,6 @@ void Journal::handle_io_event_safe(int r, uint64_t tid) { (*it)->complete(r); } else { // send any waiting aio requests now that journal entry is safe - RWLock::RLocker owner_locker(m_image_ctx.owner_lock); (*it)->send(); } } diff --git a/src/librbd/LibrbdWriteback.cc b/src/librbd/LibrbdWriteback.cc index 977b0b3163d75..3b56ed282424e 100644 --- a/src/librbd/LibrbdWriteback.cc +++ b/src/librbd/LibrbdWriteback.cc @@ -50,15 +50,12 @@ namespace librbd { */ class C_ReadRequest : public Context { public: - C_ReadRequest(CephContext *cct, Context *c, RWLock *owner_lock, - Mutex *cache_lock) - : m_cct(cct), m_ctx(c), m_owner_lock(owner_lock), - m_cache_lock(cache_lock) { + C_ReadRequest(CephContext *cct, Context *c, Mutex *cache_lock) + : m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) { } virtual void finish(int r) { ldout(m_cct, 20) << "aio_cb completing " << dendl; { - RWLock::RLocker owner_locker(*m_owner_lock); Mutex::Locker cache_locker(*m_cache_lock); m_ctx->complete(r); } @@ -67,7 +64,6 @@ namespace librbd { private: CephContext *m_cct; Context *m_ctx; - RWLock *m_owner_lock; Mutex *m_cache_lock; }; @@ -157,7 +153,6 @@ namespace librbd { ldout(cct, 20) << this << " C_WriteJournalCommit: " << "journal committed: sending write request" << dendl; - RWLock::RLocker owner_locker(image_ctx->owner_lock); assert(image_ctx->exclusive_lock->is_lock_owner()); request_sent = true; @@ -199,8 +194,7 @@ namespace librbd { __u32 trunc_seq, int op_flags, Context *onfinish) { // on completion, take the mutex and then call onfinish. - Context *req = new C_ReadRequest(m_ictx->cct, onfinish, &m_ictx->owner_lock, - &m_lock); + Context *req = new C_ReadRequest(m_ictx->cct, onfinish, &m_lock); { RWLock::RLocker snap_locker(m_ictx->snap_lock); @@ -257,7 +251,6 @@ namespace librbd { __u32 trunc_seq, ceph_tid_t journal_tid, Context *oncommit) { - assert(m_ictx->owner_lock.is_locked()); uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix); write_result_d *result = new write_result_d(oid.name, oncommit); @@ -292,7 +285,6 @@ namespace librbd { << "journal_tid=" << original_journal_tid << ", " << "new_journal_tid=" << new_journal_tid << dendl; - assert(m_ictx->owner_lock.is_locked()); uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix); // all IO operations are flushed prior to closing the journal @@ -317,14 +309,6 @@ namespace librbd { } } - void LibrbdWriteback::get_client_lock() { - m_ictx->owner_lock.get_read(); - } - - void LibrbdWriteback::put_client_lock() { - m_ictx->owner_lock.put_read(); - } - void LibrbdWriteback::complete_writes(const std::string& oid) { assert(m_lock.is_locked()); diff --git a/src/librbd/LibrbdWriteback.h b/src/librbd/LibrbdWriteback.h index a7dc05f9c1343..771757b64dc37 100644 --- a/src/librbd/LibrbdWriteback.h +++ b/src/librbd/LibrbdWriteback.h @@ -44,9 +44,6 @@ namespace librbd { uint64_t len, ceph_tid_t original_journal_tid, ceph_tid_t new_journal_tid); - virtual void get_client_lock(); - virtual void put_client_lock(); - struct write_result_d { bool done; int ret; diff --git a/src/librbd/ObjectMap.cc b/src/librbd/ObjectMap.cc index b5d659ef8a2e8..e3c63c2e33471 100644 --- a/src/librbd/ObjectMap.cc +++ b/src/librbd/ObjectMap.cc @@ -202,7 +202,6 @@ bool ObjectMap::aio_update(uint64_t start_object_no, uint64_t end_object_no, { assert(m_image_ctx.snap_lock.is_locked()); assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0); - assert(m_image_ctx.owner_lock.is_locked()); assert(m_image_ctx.image_watcher != NULL); assert(m_image_ctx.exclusive_lock == nullptr || m_image_ctx.exclusive_lock->is_lock_owner());