From: Samuel Just Date: Thu, 8 Sep 2022 17:38:22 +0000 (-0700) Subject: crimson/osd/shard_services: check core on each PerShardState method X-Git-Tag: v18.1.0~1115^2~9 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=ea46c3e6c36e166085cca03965221849cc91dd83;p=ceph.git crimson/osd/shard_services: check core on each PerShardState method Signed-off-by: Samuel Just --- diff --git a/src/crimson/osd/shard_services.cc b/src/crimson/osd/shard_services.cc index bc5a02c3d3cc..979b3314b492 100644 --- a/src/crimson/osd/shard_services.cc +++ b/src/crimson/osd/shard_services.cc @@ -52,6 +52,7 @@ PerShardState::PerShardState( seastar::future<> PerShardState::stop_pgs() { + assert_core(); return seastar::parallel_for_each( pg_map.get_pgs(), [](auto& p) { @@ -61,6 +62,7 @@ seastar::future<> PerShardState::stop_pgs() std::map PerShardState::get_pg_stats() const { + assert_core(); std::map ret; for (auto [pgid, pg] : pg_map.get_pgs()) { if (pg->is_primary()) { @@ -77,6 +79,7 @@ seastar::future<> PerShardState::broadcast_map_to_pgs( ShardServices &shard_services, epoch_t epoch) { + assert_core(); auto &pgs = pg_map.get_pgs(); return seastar::parallel_for_each( pgs.begin(), pgs.end(), @@ -90,11 +93,13 @@ seastar::future<> PerShardState::broadcast_map_to_pgs( Ref PerShardState::get_pg(spg_t pgid) { + assert_core(); return pg_map.get_pg(pgid); } HeartbeatStampsRef PerShardState::get_hb_stamps(int peer) { + assert_core(); auto [stamps, added] = heartbeat_stamps.try_emplace(peer); if (added) { stamps->second = ceph::make_ref(peer); diff --git a/src/crimson/osd/shard_services.h b/src/crimson/osd/shard_services.h index 63e251a1366f..e1adbb9049d9 100644 --- a/src/crimson/osd/shard_services.h +++ b/src/crimson/osd/shard_services.h @@ -61,6 +61,9 @@ class PerShardState { using cached_map_t = OSDMapService::cached_map_t; using local_cached_map_t = OSDMapService::local_cached_map_t; + const core_id_t core = seastar::this_shard_id(); +#define assert_core() ceph_assert(seastar::this_shard_id() == core); + const int whoami; crimson::os::FuturizedStore &store; crimson::common::CephContext cct; @@ -74,11 +77,18 @@ class PerShardState { epoch_t up_epoch = 0; OSDMapService::cached_map_t osdmap; - const auto &get_osdmap() const { return osdmap; } + const auto &get_osdmap() const { + assert_core(); + return osdmap; + } void update_map(OSDMapService::cached_map_t new_osdmap) { + assert_core(); osdmap = std::move(new_osdmap); } - void set_up_epoch(epoch_t epoch) { up_epoch = epoch; } + void set_up_epoch(epoch_t epoch) { + assert_core(); + up_epoch = epoch; + } crimson::osd::ObjectContextRegistry obc_registry; @@ -89,6 +99,7 @@ class PerShardState { // case the shutdown may never succeed. bool stopping = false; seastar::future<> stop_registry() { + assert_core(); crimson::get_logger(ceph_subsys_osd).info("PerShardState::{}", __func__); stopping = true; return registry.stop(); @@ -106,6 +117,7 @@ class PerShardState { Ref get_pg(spg_t pgid); template void for_each_pg(F &&f) const { + assert_core(); for (auto &pg : pg_map.get_pgs()) { std::invoke(f, pg.first, pg.second); } @@ -113,10 +125,13 @@ class PerShardState { template auto start_operation(Args&&... args) { + assert_core(); if (__builtin_expect(stopping, false)) { throw crimson::common::system_shutdown_exception(); } auto op = registry.create_operation(std::forward(args)...); + crimson::get_logger(ceph_subsys_osd).info( + "PerShardState::{}, {}", __func__, *op); auto fut = op->start().then([op /* by copy */] { // ensure the op's lifetime is appropriate. It is not enough to // guarantee it's alive at the scheduling stages (i.e. `then()` @@ -129,6 +144,7 @@ class PerShardState { // tids for ops i issue, prefixed with core id to ensure uniqueness ceph_tid_t next_tid; ceph_tid_t get_tid() { + assert_core(); return next_tid++; } @@ -138,6 +154,7 @@ class PerShardState { // Time state const ceph::mono_time startup_time; ceph::signedspan get_mnow() const { + assert_core(); return ceph::mono_clock::now() - startup_time; }