]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd: rename CoreState to OSDSingletonState
authorSamuel Just <sjust@redhat.com>
Tue, 26 Jul 2022 02:21:30 +0000 (02:21 +0000)
committerSamuel Just <sjust@redhat.com>
Wed, 21 Sep 2022 17:36:50 +0000 (10:36 -0700)
CoreState was a bad name -- "core" is going to be used
to refer to seastar reactors/cores as well.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/crimson/osd/pg_shard_manager.cc
src/crimson/osd/pg_shard_manager.h
src/crimson/osd/shard_services.cc
src/crimson/osd/shard_services.h

index 5b319f3e1598d07f4669197205d4fac1e6aa55cd..bfe4a762f844ed341934526dd39eab43d759c009 100644 (file)
@@ -12,10 +12,10 @@ PGShardManager::PGShardManager(
   crimson::mon::Client &monc,
   crimson::mgr::Client &mgrc,
   crimson::os::FuturizedStore &store)
-  : core_state(whoami, cluster_msgr, public_msgr,
-              monc, mgrc, store),
+  : osd_singleton_state(whoami, cluster_msgr, public_msgr,
+                       monc, mgrc, store),
     local_state(whoami),
-    shard_services(core_state, local_state)
+    shard_services(osd_singleton_state, local_state)
 {}
 
 }
index e473708df2133bd222fe95c4bcd0c57f2f438ecf..eccbce358bbfb51b894d032d8a985f7922dc7d6d 100644 (file)
@@ -21,7 +21,7 @@ namespace crimson::osd {
  * etc)
  */
 class PGShardManager {
-  CoreState core_state;
+  OSDSingletonState osd_singleton_state;
   PerShardState local_state;
   ShardServices shard_services;
 
@@ -37,7 +37,7 @@ public:
   auto &get_shard_services() { return shard_services; }
 
   void update_map(OSDMapService::cached_map_t map) {
-    core_state.update_map(map);
+    osd_singleton_state.update_map(map);
     local_state.update_map(map);
   }
 
@@ -45,56 +45,56 @@ public:
     return local_state.stop_registry();
   }
 
-  FORWARD_TO_CORE(send_pg_created)
+  FORWARD_TO_OSD_SINGLETON(send_pg_created)
 
   // osd state forwards
-  FORWARD(is_active, is_active, core_state.osd_state)
-  FORWARD(is_preboot, is_preboot, core_state.osd_state)
-  FORWARD(is_booting, is_booting, core_state.osd_state)
-  FORWARD(is_stopping, is_stopping, core_state.osd_state)
-  FORWARD(is_prestop, is_prestop, core_state.osd_state)
-  FORWARD(is_initializing, is_initializing, core_state.osd_state)
-  FORWARD(set_prestop, set_prestop, core_state.osd_state)
-  FORWARD(set_preboot, set_preboot, core_state.osd_state)
-  FORWARD(set_booting, set_booting, core_state.osd_state)
-  FORWARD(set_stopping, set_stopping, core_state.osd_state)
-  FORWARD(set_active, set_active, core_state.osd_state)
-  FORWARD(when_active, when_active, core_state.osd_state)
-  FORWARD_CONST(get_osd_state_string, to_string, core_state.osd_state)
-
-  FORWARD(got_map, got_map, core_state.osdmap_gate)
-  FORWARD(wait_for_map, wait_for_map, core_state.osdmap_gate)
+  FORWARD(is_active, is_active, osd_singleton_state.osd_state)
+  FORWARD(is_preboot, is_preboot, osd_singleton_state.osd_state)
+  FORWARD(is_booting, is_booting, osd_singleton_state.osd_state)
+  FORWARD(is_stopping, is_stopping, osd_singleton_state.osd_state)
+  FORWARD(is_prestop, is_prestop, osd_singleton_state.osd_state)
+  FORWARD(is_initializing, is_initializing, osd_singleton_state.osd_state)
+  FORWARD(set_prestop, set_prestop, osd_singleton_state.osd_state)
+  FORWARD(set_preboot, set_preboot, osd_singleton_state.osd_state)
+  FORWARD(set_booting, set_booting, osd_singleton_state.osd_state)
+  FORWARD(set_stopping, set_stopping, osd_singleton_state.osd_state)
+  FORWARD(set_active, set_active, osd_singleton_state.osd_state)
+  FORWARD(when_active, when_active, osd_singleton_state.osd_state)
+  FORWARD_CONST(get_osd_state_string, to_string, osd_singleton_state.osd_state)
+
+  FORWARD(got_map, got_map, osd_singleton_state.osdmap_gate)
+  FORWARD(wait_for_map, wait_for_map, osd_singleton_state.osdmap_gate)
 
   // Metacoll
-  FORWARD_TO_CORE(init_meta_coll)
-  FORWARD_TO_CORE(get_meta_coll)
+  FORWARD_TO_OSD_SINGLETON(init_meta_coll)
+  FORWARD_TO_OSD_SINGLETON(get_meta_coll)
 
   // Core OSDMap methods
-  FORWARD_TO_CORE(get_map)
-  FORWARD_TO_CORE(load_map_bl)
-  FORWARD_TO_CORE(load_map_bls)
-  FORWARD_TO_CORE(store_maps)
-  FORWARD_TO_CORE(get_up_epoch)
-  FORWARD_TO_CORE(set_up_epoch)
-
-  FORWARD(pg_created, pg_created, core_state.pg_map)
+  FORWARD_TO_OSD_SINGLETON(get_map)
+  FORWARD_TO_OSD_SINGLETON(load_map_bl)
+  FORWARD_TO_OSD_SINGLETON(load_map_bls)
+  FORWARD_TO_OSD_SINGLETON(store_maps)
+  FORWARD_TO_OSD_SINGLETON(get_up_epoch)
+  FORWARD_TO_OSD_SINGLETON(set_up_epoch)
+
+  FORWARD(pg_created, pg_created, osd_singleton_state.pg_map)
   auto load_pgs() {
-    return core_state.load_pgs(shard_services);
+    return osd_singleton_state.load_pgs(shard_services);
   }
-  FORWARD_TO_CORE(stop_pgs)
-  FORWARD_CONST(get_pg_stats, get_pg_stats, core_state)
+  FORWARD_TO_OSD_SINGLETON(stop_pgs)
+  FORWARD_CONST(get_pg_stats, get_pg_stats, osd_singleton_state)
 
-  FORWARD_CONST(for_each_pg, for_each_pg, core_state)
-  auto get_num_pgs() const { return core_state.pg_map.get_pgs().size(); }
+  FORWARD_CONST(for_each_pg, for_each_pg, osd_singleton_state)
+  auto get_num_pgs() const { return osd_singleton_state.pg_map.get_pgs().size(); }
 
   auto broadcast_map_to_pgs(epoch_t epoch) {
-    return core_state.broadcast_map_to_pgs(
+    return osd_singleton_state.broadcast_map_to_pgs(
       *this, shard_services, epoch);
   }
 
   template <typename F>
   auto with_pg(spg_t pgid, F &&f) {
-    return std::invoke(std::forward<F>(f), core_state.get_pg(pgid));
+    return std::invoke(std::forward<F>(f), osd_singleton_state.get_pg(pgid));
   }
 
   template <typename T, typename... Args>
@@ -109,7 +109,7 @@ public:
       opref.get_connection_pipeline().await_active
     ).then([this, &opref, &logger] {
       logger.debug("{}: start_pg_operation in await_active stage", opref);
-      return core_state.osd_state.when_active();
+      return osd_singleton_state.osd_state.when_active();
     }).then([&logger, &opref] {
       logger.debug("{}: start_pg_operation active, entering await_map", opref);
       return opref.template enter_stage<>(
@@ -121,7 +121,7 @@ public:
       return opref.template with_blocking_event<OSDMapBlockingEvent>(
        [this, &opref](auto &&trigger) {
          std::ignore = this;
-         return core_state.osdmap_gate.wait_for_map(
+         return osd_singleton_state.osdmap_gate.wait_for_map(
            std::move(trigger),
            opref.get_epoch(),
            &shard_services);
@@ -138,7 +138,7 @@ public:
          PGMap::PGCreationBlockingEvent
          >([this, &opref](auto &&trigger) {
            std::ignore = this; // avoid clang warning
-           return core_state.get_or_create_pg(
+           return osd_singleton_state.get_or_create_pg(
              *this,
              shard_services,
              std::move(trigger),
@@ -151,7 +151,8 @@ public:
          PGMap::PGCreationBlockingEvent
          >([this, &opref](auto &&trigger) {
            std::ignore = this; // avoid clang warning
-           return core_state.wait_for_pg(std::move(trigger), opref.get_pgid());
+           return osd_singleton_state.wait_for_pg(
+             std::move(trigger), opref.get_pgid());
          });
       }
     }).then([this, &logger, &opref](Ref<PG> pgref) {
index e034c8c511bcc2e4795b3cc433957d47a604d667..654c37559354b2547804d204252ed751730a580e 100644 (file)
@@ -46,7 +46,7 @@ PerShardState::PerShardState(
   cct.get_perfcounters_collection()->add(recoverystate_perf);
 }
 
-CoreState::CoreState(
+OSDSingletonState::OSDSingletonState(
   int whoami,
   crimson::net::Messenger &cluster_msgr,
   crimson::net::Messenger &public_msgr,
@@ -54,7 +54,7 @@ CoreState::CoreState(
   crimson::mgr::Client &mgrc,
   crimson::os::FuturizedStore &store)
   : whoami(whoami),
-    osdmap_gate("CoreState::osdmap_gate"),
+    osdmap_gate("OSDSingletonState::osdmap_gate"),
     cluster_msgr(cluster_msgr),
     public_msgr(public_msgr),
     monc(monc),
@@ -75,7 +75,7 @@ CoreState::CoreState(
   osdmaps[0] = boost::make_local_shared<OSDMap>();
 }
 
-seastar::future<> CoreState::send_to_osd(
+seastar::future<> OSDSingletonState::send_to_osd(
   int peer, MessageURef m, epoch_t from_epoch)
 {
   if (osdmap->is_down(peer)) {
@@ -92,7 +92,7 @@ seastar::future<> CoreState::send_to_osd(
   }
 }
 
-seastar::future<> CoreState::osdmap_subscribe(
+seastar::future<> OSDSingletonState::osdmap_subscribe(
   version_t epoch, bool force_request)
 {
   logger().info("{}({})", __func__, epoch);
@@ -104,7 +104,7 @@ seastar::future<> CoreState::osdmap_subscribe(
   }
 }
 
-void CoreState::queue_want_pg_temp(
+void OSDSingletonState::queue_want_pg_temp(
   pg_t pgid,
   const vector<int>& want,
   bool forced)
@@ -117,13 +117,13 @@ void CoreState::queue_want_pg_temp(
   }
 }
 
-void CoreState::remove_want_pg_temp(pg_t pgid)
+void OSDSingletonState::remove_want_pg_temp(pg_t pgid)
 {
   pg_temp_wanted.erase(pgid);
   pg_temp_pending.erase(pgid);
 }
 
-void CoreState::requeue_pg_temp()
+void OSDSingletonState::requeue_pg_temp()
 {
   unsigned old_wanted = pg_temp_wanted.size();
   unsigned old_pending = pg_temp_pending.size();
@@ -137,7 +137,7 @@ void CoreState::requeue_pg_temp()
     pg_temp_wanted.size());
 }
 
-seastar::future<> CoreState::send_pg_temp()
+seastar::future<> OSDSingletonState::send_pg_temp()
 {
   if (pg_temp_wanted.empty())
     return seastar::now();
@@ -165,7 +165,7 @@ seastar::future<> CoreState::send_pg_temp()
 
 std::ostream& operator<<(
   std::ostream& out,
-  const CoreState::pg_temp_t& pg_temp)
+  const OSDSingletonState::pg_temp_t& pg_temp)
 {
   out << pg_temp.acting;
   if (pg_temp.forced) {
@@ -174,7 +174,7 @@ std::ostream& operator<<(
   return out;
 }
 
-seastar::future<> CoreState::send_pg_created(pg_t pgid)
+seastar::future<> OSDSingletonState::send_pg_created(pg_t pgid)
 {
   logger().debug(__func__);
   auto o = get_osdmap();
@@ -183,7 +183,7 @@ seastar::future<> CoreState::send_pg_created(pg_t pgid)
   return monc.send_message(crimson::make_message<MOSDPGCreated>(pgid));
 }
 
-seastar::future<> CoreState::send_pg_created()
+seastar::future<> OSDSingletonState::send_pg_created()
 {
   logger().debug(__func__);
   auto o = get_osdmap();
@@ -194,7 +194,7 @@ seastar::future<> CoreState::send_pg_created()
     });
 }
 
-void CoreState::prune_pg_created()
+void OSDSingletonState::prune_pg_created()
 {
   logger().debug(__func__);
   auto o = get_osdmap();
@@ -211,7 +211,7 @@ void CoreState::prune_pg_created()
   }
 }
 
-HeartbeatStampsRef CoreState::get_hb_stamps(int peer)
+HeartbeatStampsRef OSDSingletonState::get_hb_stamps(int peer)
 {
   auto [stamps, added] = heartbeat_stamps.try_emplace(peer);
   if (added) {
@@ -220,7 +220,7 @@ HeartbeatStampsRef CoreState::get_hb_stamps(int peer)
   return stamps->second;
 }
 
-seastar::future<> CoreState::send_alive(const epoch_t want)
+seastar::future<> OSDSingletonState::send_alive(const epoch_t want)
 {
   logger().info(
     "{} want={} up_thru_wanted={}",
@@ -249,7 +249,7 @@ seastar::future<> CoreState::send_alive(const epoch_t want)
   }
 }
 
-const char** CoreState::get_tracked_conf_keys() const
+const char** OSDSingletonState::get_tracked_conf_keys() const
 {
   static const char* KEYS[] = {
     "osd_max_backfills",
@@ -259,7 +259,7 @@ const char** CoreState::get_tracked_conf_keys() const
   return KEYS;
 }
 
-void CoreState::handle_conf_change(
+void OSDSingletonState::handle_conf_change(
   const ConfigProxy& conf,
   const std::set <std::string> &changed)
 {
@@ -273,12 +273,12 @@ void CoreState::handle_conf_change(
   }
 }
 
-CoreState::cached_map_t CoreState::get_map() const
+OSDSingletonState::cached_map_t OSDSingletonState::get_map() const
 {
   return osdmap;
 }
 
-seastar::future<CoreState::cached_map_t> CoreState::get_map(epoch_t e)
+seastar::future<OSDSingletonState::cached_map_t> OSDSingletonState::get_map(epoch_t e)
 {
   // TODO: use LRU cache for managing osdmap, fallback to disk if we have to
   if (auto found = osdmaps.find(e); found) {
@@ -291,7 +291,7 @@ seastar::future<CoreState::cached_map_t> CoreState::get_map(epoch_t e)
   }
 }
 
-void CoreState::store_map_bl(
+void OSDSingletonState::store_map_bl(
   ceph::os::Transaction& t,
   epoch_t e, bufferlist&& bl)
 {
@@ -299,7 +299,7 @@ void CoreState::store_map_bl(
   map_bl_cache.insert(e, std::move(bl));
 }
 
-seastar::future<bufferlist> CoreState::load_map_bl(
+seastar::future<bufferlist> OSDSingletonState::load_map_bl(
   epoch_t e)
 {
   if (std::optional<bufferlist> found = map_bl_cache.find(e); found) {
@@ -309,7 +309,7 @@ seastar::future<bufferlist> CoreState::load_map_bl(
   }
 }
 
-seastar::future<std::map<epoch_t, bufferlist>> CoreState::load_map_bls(
+seastar::future<std::map<epoch_t, bufferlist>> OSDSingletonState::load_map_bls(
   epoch_t first,
   epoch_t last)
 {
@@ -328,7 +328,7 @@ seastar::future<std::map<epoch_t, bufferlist>> CoreState::load_map_bls(
   });
 }
 
-seastar::future<std::unique_ptr<OSDMap>> CoreState::load_map(epoch_t e)
+seastar::future<std::unique_ptr<OSDMap>> OSDSingletonState::load_map(epoch_t e)
 {
   auto o = std::make_unique<OSDMap>();
   if (e > 0) {
@@ -341,7 +341,7 @@ seastar::future<std::unique_ptr<OSDMap>> CoreState::load_map(epoch_t e)
   }
 }
 
-seastar::future<> CoreState::store_maps(ceph::os::Transaction& t,
+seastar::future<> OSDSingletonState::store_maps(ceph::os::Transaction& t,
                                   epoch_t start, Ref<MOSDMap> m)
 {
   return seastar::do_for_each(
@@ -375,7 +375,7 @@ seastar::future<> CoreState::store_maps(ceph::os::Transaction& t,
     });
 }
 
-seastar::future<Ref<PG>> CoreState::make_pg(
+seastar::future<Ref<PG>> OSDSingletonState::make_pg(
   ShardServices &shard_services,
   OSDMapService::cached_map_t create_map,
   spg_t pgid,
@@ -429,7 +429,7 @@ seastar::future<Ref<PG>> CoreState::make_pg(
   });
 }
 
-seastar::future<Ref<PG>> CoreState::handle_pg_create_info(
+seastar::future<Ref<PG>> OSDSingletonState::handle_pg_create_info(
   PGShardManager &shard_manager,
   ShardServices &shard_services,
   std::unique_ptr<PGCreateInfo> info) {
@@ -520,7 +520,7 @@ seastar::future<Ref<PG>> CoreState::handle_pg_create_info(
 
 
 seastar::future<Ref<PG>>
-CoreState::get_or_create_pg(
+OSDSingletonState::get_or_create_pg(
   PGShardManager &shard_manager,
   ShardServices &shard_services,
   PGMap::PGCreationBlockingEvent::TriggerI&& trigger,
@@ -541,18 +541,18 @@ CoreState::get_or_create_pg(
   }
 }
 
-seastar::future<Ref<PG>> CoreState::wait_for_pg(
+seastar::future<Ref<PG>> OSDSingletonState::wait_for_pg(
   PGMap::PGCreationBlockingEvent::TriggerI&& trigger, spg_t pgid)
 {
   return pg_map.wait_for_pg(std::move(trigger), pgid).first;
 }
 
-Ref<PG> CoreState::get_pg(spg_t pgid)
+Ref<PG> OSDSingletonState::get_pg(spg_t pgid)
 {
   return pg_map.get_pg(pgid);
 }
 
-seastar::future<> CoreState::load_pgs(
+seastar::future<> OSDSingletonState::load_pgs(
   ShardServices &shard_services)
 {
   return store.list_collections(
@@ -585,7 +585,7 @@ seastar::future<> CoreState::load_pgs(
   });
 }
 
-seastar::future<Ref<PG>> CoreState::load_pg(
+seastar::future<Ref<PG>> OSDSingletonState::load_pg(
   ShardServices &shard_services,
   spg_t pgid)
 {
@@ -608,7 +608,7 @@ seastar::future<Ref<PG>> CoreState::load_pg(
   });
 }
 
-seastar::future<> CoreState::stop_pgs()
+seastar::future<> OSDSingletonState::stop_pgs()
 {
   return seastar::parallel_for_each(
     pg_map.get_pgs(),
@@ -617,7 +617,7 @@ seastar::future<> CoreState::stop_pgs()
     });
 }
 
-std::map<pg_t, pg_stat_t> CoreState::get_pg_stats() const
+std::map<pg_t, pg_stat_t> OSDSingletonState::get_pg_stats() const
 {
   std::map<pg_t, pg_stat_t> ret;
   for (auto [pgid, pg] : pg_map.get_pgs()) {
@@ -631,7 +631,7 @@ std::map<pg_t, pg_stat_t> CoreState::get_pg_stats() const
   return ret;
 }
 
-seastar::future<> CoreState::broadcast_map_to_pgs(
+seastar::future<> OSDSingletonState::broadcast_map_to_pgs(
   PGShardManager &shard_manager,
   ShardServices &shard_services,
   epoch_t epoch)
index 1860c94dbf5d4f438e918ee51081ea34c7020679..9698a89eed3836a21608c51945695c24eeeb3d87 100644 (file)
@@ -105,15 +105,15 @@ class PerShardState {
 };
 
 /**
- * CoreState
+ * OSDSingletonState
  *
  * OSD-wide singleton holding instances that need to be accessible
  * from all PGs.
  */
-class CoreState : public md_config_obs_t, public OSDMapService {
+class OSDSingletonState : public md_config_obs_t, public OSDMapService {
   friend class ShardServices;
   friend class PGShardManager;
-  CoreState(
+  OSDSingletonState(
     int whoami,
     crimson::net::Messenger &cluster_msgr,
     crimson::net::Messenger &public_msgr,
@@ -295,7 +295,8 @@ class CoreState : public md_config_obs_t, public OSDMapService {
   }
 
 #define FORWARD_TO_LOCAL(METHOD) FORWARD(METHOD, METHOD, local_state)
-#define FORWARD_TO_CORE(METHOD) FORWARD(METHOD, METHOD, core_state)
+#define FORWARD_TO_OSD_SINGLETON(METHOD) \
+  FORWARD(METHOD, METHOD, osd_singleton_state)
 
 /**
  * Represents services available to each PG
@@ -303,18 +304,18 @@ class CoreState : public md_config_obs_t, public OSDMapService {
 class ShardServices {
   using cached_map_t = boost::local_shared_ptr<const OSDMap>;
 
-  CoreState &core_state;
+  OSDSingletonState &osd_singleton_state;
   PerShardState &local_state;
 public:
   ShardServices(
-    CoreState &core_state,
+    OSDSingletonState &osd_singleton_state,
     PerShardState &local_state)
-    : core_state(core_state), local_state(local_state) {}
+    : osd_singleton_state(osd_singleton_state), local_state(local_state) {}
 
-  FORWARD_TO_CORE(send_to_osd)
+  FORWARD_TO_OSD_SINGLETON(send_to_osd)
 
   crimson::os::FuturizedStore &get_store() {
-    return core_state.store;
+    return osd_singleton_state.store;
   }
 
   crimson::common::CephContext *get_cct() {
@@ -323,7 +324,7 @@ public:
 
   // OSDMapService
   const OSDMapService &get_osdmap_service() const {
-    return core_state;
+    return osd_singleton_state;
   }
 
   template <typename T, typename... Args>
@@ -361,21 +362,21 @@ public:
   }
 
   FORWARD_TO_LOCAL(get_osdmap)
-  FORWARD_TO_CORE(get_pg_num)
+  FORWARD_TO_OSD_SINGLETON(get_pg_num)
   FORWARD(with_throttle_while, with_throttle_while, local_state.throttler)
 
-  FORWARD_TO_CORE(osdmap_subscribe)
-  FORWARD_TO_CORE(get_tid)
-  FORWARD_TO_CORE(queue_want_pg_temp)
-  FORWARD_TO_CORE(remove_want_pg_temp)
-  FORWARD_TO_CORE(requeue_pg_temp)
-  FORWARD_TO_CORE(send_pg_created)
-  FORWARD_TO_CORE(inc_pg_num)
-  FORWARD_TO_CORE(dec_pg_num)
-  FORWARD_TO_CORE(send_alive)
-  FORWARD_TO_CORE(send_pg_temp)
-  FORWARD_CONST(get_mnow, get_mnow, core_state)
-  FORWARD_TO_CORE(get_hb_stamps)
+  FORWARD_TO_OSD_SINGLETON(osdmap_subscribe)
+  FORWARD_TO_OSD_SINGLETON(get_tid)
+  FORWARD_TO_OSD_SINGLETON(queue_want_pg_temp)
+  FORWARD_TO_OSD_SINGLETON(remove_want_pg_temp)
+  FORWARD_TO_OSD_SINGLETON(requeue_pg_temp)
+  FORWARD_TO_OSD_SINGLETON(send_pg_created)
+  FORWARD_TO_OSD_SINGLETON(inc_pg_num)
+  FORWARD_TO_OSD_SINGLETON(dec_pg_num)
+  FORWARD_TO_OSD_SINGLETON(send_alive)
+  FORWARD_TO_OSD_SINGLETON(send_pg_temp)
+  FORWARD_CONST(get_mnow, get_mnow, osd_singleton_state)
+  FORWARD_TO_OSD_SINGLETON(get_hb_stamps)
 
   FORWARD(
     maybe_get_cached_obc, maybe_get_cached_obc, local_state.obc_registry)
@@ -383,15 +384,20 @@ public:
     get_cached_obc, get_cached_obc, local_state.obc_registry)
 
   FORWARD(
-    local_request_reservation, request_reservation, core_state.local_reserver)
+    local_request_reservation, request_reservation,
+    osd_singleton_state.local_reserver)
   FORWARD(
-    local_update_priority, update_priority, core_state.local_reserver)
+    local_update_priority, update_priority,
+    osd_singleton_state.local_reserver)
   FORWARD(
-    local_cancel_reservation, cancel_reservation, core_state.local_reserver)
+    local_cancel_reservation, cancel_reservation,
+    osd_singleton_state.local_reserver)
   FORWARD(
-    remote_request_reservation, request_reservation, core_state.remote_reserver)
+    remote_request_reservation, request_reservation,
+    osd_singleton_state.remote_reserver)
   FORWARD(
-    remote_cancel_reservation, cancel_reservation, core_state.remote_reserver)
+    remote_cancel_reservation, cancel_reservation,
+    osd_singleton_state.remote_reserver)
 };
 
 }