From 9ab2400109cf00e99f05830f1a8ac2b997c547af Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Tue, 2 Jan 2018 16:42:36 -0600 Subject: [PATCH] osd: use atomic for pg_map_size This avoids the need for pg_map_lock in the max pg check. Signed-off-by: Sage Weil --- src/osd/OSD.cc | 15 ++++++++------- src/osd/OSD.h | 1 + 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 67a17acc7c6ac..261173c7cb82c 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -2119,10 +2119,7 @@ bool OSD::asok_command(std::string_view admin_command, const cmdmap_t& cmdmap, f->dump_string("state", get_state_name(get_state())); f->dump_unsigned("oldest_map", superblock.oldest_map); f->dump_unsigned("newest_map", superblock.newest_map); - { - RWLock::RLocker l(pg_map_lock); - f->dump_unsigned("num_pgs", pg_map.size()); - } + f->dump_unsigned("num_pgs", pg_map_size); f->close_section(); } else if (admin_command == "flush_journal") { store->flush_journal(); @@ -3802,6 +3799,7 @@ PGRef OSD::_open_pg( { RWLock::WLocker l(pg_map_lock); pg_map[pgid] = pg; + pg_map_size = pg_map.size(); pg->get("PGMap"); // because it's in pg_map service.pg_add_epoch(pg->pg_id, createmap->get_epoch()); service.init_splits_between(pgid, createmap, servicemap); @@ -3909,6 +3907,7 @@ PGRef OSD::_lookup_pg(spg_t pgid) return pg; } pg_map.erase(p); + pg_map_size = pg_map.size(); pg->put("PGMap"); } return nullptr; @@ -4199,10 +4198,10 @@ bool OSD::maybe_wait_for_max_pg(spg_t pgid, bool is_mon_create) (cct->_conf->get_val("mon_max_pg_per_osd") * cct->_conf->get_val("osd_max_pg_per_osd_hard_ratio")); - RWLock::RLocker pg_map_locker{pg_map_lock}; - if (pg_map.size() < max_pgs_per_osd) { + if (pg_map_size < max_pgs_per_osd) { return false; } + [[gnu::unused]] auto&& pending_creates_locker = guardedly_lock(pending_creates_lock); if (is_mon_create) { pending_creates_from_mon++; @@ -4211,7 +4210,7 @@ bool OSD::maybe_wait_for_max_pg(spg_t pgid, bool is_mon_create) pending_creates_from_osd.emplace(pgid.pgid, is_primary); } dout(1) << __func__ << " withhold creation of pg " << pgid - << ": " << pg_map.size() << " >= "<< max_pgs_per_osd << dendl; + << ": " << pg_map_size << " >= "<< max_pgs_per_osd << dendl; return true; } @@ -7938,6 +7937,7 @@ void OSD::_finish_splits(set& pgs) pg_map_lock.get_write(); pg->get("PGMap"); // For pg_map pg_map[pg->get_pgid()] = pg; + pg_map_size = pg_map.size(); service.complete_split(pg->get_pgid()); service.pg_add_epoch(pg->pg_id, e); pg_map_lock.put_write(); @@ -9068,6 +9068,7 @@ void OSD::dequeue_peering_evt( p->second == pg) { dout(20) << __func__ << " removed pg " << pg << " from pg_map" << dendl; pg_map.erase(p); + pg_map_size = pg_map.size(); pg->put("PGMap"); } else { dout(20) << __func__ << " failed to remove pg " << pg << " from pg_map" << dendl; diff --git a/src/osd/OSD.h b/src/osd/OSD.h index a9b7ecc5548f3..258656837e8af 100644 --- a/src/osd/OSD.h +++ b/src/osd/OSD.h @@ -1851,6 +1851,7 @@ protected: // -- placement groups -- RWLock pg_map_lock; // this lock orders *above* individual PG _locks ceph::unordered_map pg_map; // protected by pg_map lock + std::atomic pg_map_size = {0}; std::mutex pending_creates_lock; using create_from_osd_t = std::pair; -- 2.39.5