]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd: settle snap_reserver within ShardServices
authorRadoslaw Zarzynski <rzarzyns@redhat.com>
Wed, 26 Oct 2022 17:45:50 +0000 (17:45 +0000)
committerRadoslaw Zarzynski <rzarzyns@redhat.com>
Tue, 28 Feb 2023 16:22:04 +0000 (16:22 +0000)
This bit will be used by the upcoming `SnapTrimRequest`.

Signed-off-by: Radoslaw Zarzynski <rzarzyns@redhat.com>
src/crimson/osd/shard_services.cc
src/crimson/osd/shard_services.h

index c34ae8cb2bc5be31cdb37f06b5cd88d5a6d95ede..c8668ea8fe9e80989837dd2ddb53cc6f5c445ea5 100644 (file)
@@ -135,7 +135,11 @@ OSDSingletonState::OSDSingletonState(
       &cct,
       &finisher,
       crimson::common::local_conf()->osd_max_backfills,
-      crimson::common::local_conf()->osd_min_recovery_priority)
+      crimson::common::local_conf()->osd_min_recovery_priority),
+    snap_reserver(
+      &cct,
+      &finisher,
+      crimson::common::local_conf()->osd_max_trimming_pgs)
 {
   crimson::common::local_conf().add_observer(this);
   osdmaps[0] = boost::make_local_shared<OSDMap>();
@@ -317,6 +321,7 @@ const char** OSDSingletonState::get_tracked_conf_keys() const
   static const char* KEYS[] = {
     "osd_max_backfills",
     "osd_min_recovery_priority",
+    "osd_max_trimming_pgs",
     nullptr
   };
   return KEYS;
@@ -334,6 +339,9 @@ void OSDSingletonState::handle_conf_change(
     local_reserver.set_min_priority(conf->osd_min_recovery_priority);
     remote_reserver.set_min_priority(conf->osd_min_recovery_priority);
   }
+  if (changed.count("osd_max_trimming_pgs")) {
+    snap_reserver.set_max(conf->osd_max_trimming_pgs);
+  }
 }
 
 seastar::future<OSDSingletonState::local_cached_map_t>
index af25a110ea7e2d828a89fcb6e135fd10cbe8adff..86397971f14aba0a1415c636fa854dd9a8ba14be 100644 (file)
@@ -269,6 +269,7 @@ private:
   } finisher;
   AsyncReserver<spg_t, DirectFinisher> local_reserver;
   AsyncReserver<spg_t, DirectFinisher> remote_reserver;
+  AsyncReserver<spg_t, DirectFinisher> snap_reserver;
 
   epoch_t up_thru_wanted = 0;
   seastar::future<> send_alive(epoch_t want);
@@ -478,6 +479,12 @@ public:
   FORWARD_TO_OSD_SINGLETON_TARGET(
     remote_dump_reservations,
     remote_reserver.dump)
+  FORWARD_TO_OSD_SINGLETON_TARGET(
+    snap_cancel_reservation,
+    snap_reserver.cancel_reservation)
+  FORWARD_TO_OSD_SINGLETON_TARGET(
+    snap_dump_reservations,
+    snap_reserver.dump)
 
   Context *invoke_context_on_core(core_id_t core, Context *c) {
     if (!c) return nullptr;
@@ -523,6 +530,23 @@ public:
       invoke_context_on_core(seastar::this_shard_id(), on_reserved),
       invoke_context_on_core(seastar::this_shard_id(), on_preempt));
   }
+  seastar::future<> snap_request_reservation(
+    spg_t item,
+    Context *on_reserved,
+    unsigned prio,
+    Context *on_preempt) {
+    return with_singleton(
+      [item, prio](OSDSingletonState &singleton,
+                  Context *wrapped_on_reserved, Context *wrapped_on_preempt) {
+       return singleton.snap_reserver.request_reservation(
+         item,
+         wrapped_on_reserved,
+         prio,
+         wrapped_on_preempt);
+      },
+      invoke_context_on_core(seastar::this_shard_id(), on_reserved),
+      invoke_context_on_core(seastar::this_shard_id(), on_preempt));
+  }
 
 #undef FORWARD_CONST
 #undef FORWARD