]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd/osd_operations/peering_event: remove shard_services from constructor
authorSamuel Just <sjust@redhat.com>
Fri, 24 Jun 2022 22:02:06 +0000 (15:02 -0700)
committerSamuel Just <sjust@redhat.com>
Wed, 13 Jul 2022 00:50:02 +0000 (00:50 +0000)
We'll want to supply this as part of with_pg_operation etc.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/crimson/osd/osd.cc
src/crimson/osd/osd_operations/compound_peering_request.cc
src/crimson/osd/osd_operations/peering_event.cc
src/crimson/osd/osd_operations/peering_event.h
src/crimson/osd/pg.cc
src/crimson/osd/pg.h
src/crimson/osd/pg_recovery.cc
src/crimson/osd/recovery_backend.cc

index e9b792ed7fed629789853155ee8eb85dbf3144e3..979fd138b7bc25c4778901032d78edbbd64063de 100644 (file)
@@ -1309,7 +1309,6 @@ seastar::future<> OSD::handle_scrub(crimson::net::ConnectionRef conn,
     PeeringState::RequestScrub scrub_request{m->deep, m->repair};
     return start_pg_operation<RemotePeeringEvent>(
       conn,
-      shard_services,
       from_shard,
       pgid,
       PGPeeringEvent{m->epoch, m->epoch, scrub_request}).second;
@@ -1419,7 +1418,6 @@ seastar::future<> OSD::handle_peering_op(
   std::unique_ptr<PGPeeringEvent> evt(m->get_event());
   (void) start_pg_operation<RemotePeeringEvent>(
     conn,
-    shard_services,
     pg_shard_t{from, m->get_spg().shard},
     m->get_spg(),
     std::move(*evt));
index ec6487dfa21bb4af8c596c8d0fc776b2c701d94e..b630fc2b760023ef9db93fa88fdbe514a93ae894 100644 (file)
@@ -44,7 +44,9 @@ public:
     RemotePeeringEvent(std::forward<Args>(args)...), state(state) {}
 
   PeeringEvent::interruptible_future<>
-  complete_rctx(Ref<crimson::osd::PG> pg) final {
+  complete_rctx(
+    ShardServices &shard_services,
+    Ref<crimson::osd::PG> pg) final {
     logger().debug("{}: submitting ctx transaction", *this);
     state->ctx.accept_buffered_messages(ctx);
     state = {};
@@ -86,7 +88,6 @@ std::vector<crimson::OperationRef> handle_pg_create(
       auto op = osd.start_pg_operation<PeeringSubEvent>(
          state,
          conn,
-         osd.get_shard_services(),
          pg_shard_t(),
          pgid,
          m->epoch,
index 1347a2dc68490fb203a8bb9dea16e03fda7912c3..e84439b76727d6c2b54b0a1de596972e0fe9c4e3 100644 (file)
@@ -58,9 +58,9 @@ seastar::future<> PeeringEvent<T>::with_pg(
 {
   if (!pg) {
     logger().warn("{}: pg absent, did not create", *this);
-    on_pg_absent();
+    on_pg_absent(shard_services);
     that()->get_handle().exit();
-    return complete_rctx_no_pg();
+    return complete_rctx_no_pg(shard_services);
   }
 
   using interruptor = typename T::interruptor;
@@ -81,10 +81,10 @@ seastar::future<> PeeringEvent<T>::with_pg(
       // recovery.
       return this->template enter_stage<interruptor>(
        BackfillRecovery::bp(*pg).process);
-    }).then_interruptible([this, pg] {
+    }).then_interruptible([this, pg, &shard_services] {
       pg->do_peering_event(evt, ctx);
       that()->get_handle().exit();
-      return complete_rctx(pg);
+      return complete_rctx(shard_services, pg);
     }).then_interruptible([pg, &shard_services]()
                          -> typename T::template interruptible_future<> {
         if (!pg->get_need_up_thru()) {
@@ -100,14 +100,14 @@ seastar::future<> PeeringEvent<T>::with_pg(
 }
 
 template <class T>
-void PeeringEvent<T>::on_pg_absent()
+void PeeringEvent<T>::on_pg_absent(ShardServices &)
 {
   logger().debug("{}: pg absent, dropping", *this);
 }
 
 template <class T>
 typename PeeringEvent<T>::template interruptible_future<>
-PeeringEvent<T>::complete_rctx(Ref<PG> pg)
+PeeringEvent<T>::complete_rctx(ShardServices &shard_services, Ref<PG> pg)
 {
   logger().debug("{}: submitting ctx", *this);
   return shard_services.dispatch_context(
@@ -120,7 +120,7 @@ ConnectionPipeline &RemotePeeringEvent::get_connection_pipeline()
   return get_osd_priv(conn.get()).peering_request_conn_pipeline;
 }
 
-void RemotePeeringEvent::on_pg_absent()
+void RemotePeeringEvent::on_pg_absent(ShardServices &shard_services)
 {
   if (auto& e = get_event().get_event();
       e.dynamic_type() == MQuery::static_type()) {
@@ -143,16 +143,19 @@ void RemotePeeringEvent::on_pg_absent()
   }
 }
 
-RemotePeeringEvent::interruptible_future<> RemotePeeringEvent::complete_rctx(Ref<PG> pg)
+RemotePeeringEvent::interruptible_future<> RemotePeeringEvent::complete_rctx(
+  ShardServices &shard_services,
+  Ref<PG> pg)
 {
   if (pg) {
-    return PeeringEvent::complete_rctx(pg);
+    return PeeringEvent::complete_rctx(shard_services, pg);
   } else {
     return shard_services.dispatch_context_messages(std::move(ctx));
   }
 }
 
-seastar::future<> RemotePeeringEvent::complete_rctx_no_pg()
+seastar::future<> RemotePeeringEvent::complete_rctx_no_pg(
+  ShardServices &shard_services)
 {
   return shard_services.dispatch_context_messages(std::move(ctx));
 }
@@ -168,7 +171,7 @@ seastar::future<> LocalPeeringEvent::start()
       std::chrono::milliseconds(std::lround(delay * 1000)));
   }
   return maybe_delay.then([this] {
-    return with_pg(shard_services, pg);
+    return with_pg(pg->get_shard_services(), pg);
   }).finally([ref=std::move(ref)] {
     logger().debug("{}: complete", *ref);
   });
index 65b9e9e79633d5cc5bc4781ce5495dd836de731d..4ccbf3f27fa4afec98744da449a584ed95ba390a 100644 (file)
@@ -52,7 +52,6 @@ public:
 protected:
   PGPeeringPipeline &pp(PG &pg);
 
-  ShardServices &shard_services;
   PeeringCtx ctx;
   pg_shard_t from;
   spg_t pgid;
@@ -71,28 +70,28 @@ protected:
     return evt;
   }
 
-  virtual void on_pg_absent();
+  virtual void on_pg_absent(ShardServices &);
 
   virtual typename PeeringEvent::template interruptible_future<>
-  complete_rctx(Ref<PG>);
+  complete_rctx(ShardServices &, Ref<PG>);
 
-  virtual seastar::future<> complete_rctx_no_pg() { return seastar::now();}
+  virtual seastar::future<> complete_rctx_no_pg(
+    ShardServices &shard_services
+  ) { return seastar::now();}
 
 public:
   template <typename... Args>
   PeeringEvent(
-    ShardServices &shard_services, const pg_shard_t &from, const spg_t &pgid,
+    const pg_shard_t &from, const spg_t &pgid,
     Args&&... args) :
-    shard_services(shard_services),
     from(from),
     pgid(pgid),
     evt(std::forward<Args>(args)...)
   {}
   template <typename... Args>
   PeeringEvent(
-    ShardServices &shard_services, const pg_shard_t &from, const spg_t &pgid,
+    const pg_shard_t &from, const spg_t &pgid,
     float delay, Args&&... args) :
-    shard_services(shard_services),
     from(from),
     pgid(pgid),
     delay(delay),
@@ -111,9 +110,13 @@ protected:
   // must be after conn due to ConnectionPipeline's life-time
   PipelineHandle handle;
 
-  void on_pg_absent() final;
-  PeeringEvent::interruptible_future<> complete_rctx(Ref<PG> pg) override;
-  seastar::future<> complete_rctx_no_pg() override;
+  void on_pg_absent(ShardServices &) final;
+  PeeringEvent::interruptible_future<> complete_rctx(
+    ShardServices &shard_services,
+    Ref<PG> pg) override;
+  seastar::future<> complete_rctx_no_pg(
+    ShardServices &shard_services
+  ) override;
 
 public:
   class OSDPipeline {
index e3d70901580ec319d7ecb102c21f27dbd262bc28..33eeb4307865a65cc7dc2121cc1ab1ed65c5604f 100644 (file)
@@ -137,7 +137,6 @@ bool PG::try_flush_or_schedule_async() {
     [this, epoch=get_osdmap_epoch()]() {
       return shard_services.start_operation<LocalPeeringEvent>(
        this,
-       shard_services,
        pg_whoami,
        pgid,
        epoch,
@@ -176,7 +175,6 @@ void PG::queue_check_readable(epoch_t last_peering_reset, ceph::timespan delay)
   check_readable_timer.set_callback([last_peering_reset, this] {
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       last_peering_reset,
@@ -266,7 +264,6 @@ void PG::on_activate_complete()
                   __func__);
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       float(0.001),
@@ -278,7 +275,6 @@ void PG::on_activate_complete()
                   __func__);
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       float(0.001),
@@ -290,7 +286,6 @@ void PG::on_activate_complete()
                   " for pg: {}", __func__, pgid);
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       float(0.001),
@@ -397,7 +392,6 @@ void PG::schedule_renew_lease(epoch_t last_peering_reset, ceph::timespan delay)
   renew_lease_timer.set_callback([last_peering_reset, this] {
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       last_peering_reset,
@@ -459,7 +453,6 @@ seastar::future<> PG::read_state(crimson::os::FuturizedStore* store)
     epoch_t epoch = get_osdmap_epoch();
     (void) shard_services.start_operation<LocalPeeringEvent>(
        this,
-       shard_services,
        pg_whoami,
        pgid,
        epoch,
index f1c474b0675b1f4ea95987f382eede10d7b05a3e..236a92b30405b404adcc037ff7c82d1376df6473 100644 (file)
@@ -203,7 +203,6 @@ public:
   void start_peering_event_operation(T &&evt, float delay = 0) {
     (void) shard_services.start_operation<LocalPeeringEvent>(
       this,
-      shard_services,
       pg_whoami,
       pgid,
       delay,
index ccdd4980afb630c5fbe289cd4b254a4f7f418ca2..9264dda56e109e14202433a2a6255618899a3d1a 100644 (file)
@@ -73,7 +73,6 @@ PGRecovery::start_recovery_ops(
                       pg->get_pgid());
         (void) pg->get_shard_services().start_operation<LocalPeeringEvent>(
           static_cast<crimson::osd::PG*>(pg),
-          pg->get_shard_services(),
           pg->get_pg_whoami(),
           pg->get_pgid(),
           pg->get_osdmap_epoch(),
@@ -84,7 +83,6 @@ PGRecovery::start_recovery_ops(
                       pg->get_pgid());
         (void) pg->get_shard_services().start_operation<LocalPeeringEvent>(
           static_cast<crimson::osd::PG*>(pg),
-          pg->get_shard_services(),
           pg->get_pg_whoami(),
           pg->get_pgid(),
           pg->get_osdmap_epoch(),
@@ -534,7 +532,6 @@ void PGRecovery::backfilled()
   using LocalPeeringEvent = crimson::osd::LocalPeeringEvent;
   std::ignore = pg->get_shard_services().start_operation<LocalPeeringEvent>(
     static_cast<crimson::osd::PG*>(pg),
-    pg->get_shard_services(),
     pg->get_pg_whoami(),
     pg->get_pgid(),
     pg->get_osdmap_epoch(),
index 040cf0d88b2b676b20c385a0f55151b87cc19d7e..24d7d00477d6fe5573a37545d25df319b9898cda 100644 (file)
@@ -81,7 +81,6 @@ void RecoveryBackend::handle_backfill_finish(
   std::ignore = m.get_connection()->send(std::move(reply));
   shard_services.start_operation<crimson::osd::LocalPeeringEvent>(
     static_cast<crimson::osd::PG*>(&pg),
-    shard_services,
     pg.get_pg_whoami(),
     pg.get_pgid(),
     pg.get_osdmap_epoch(),
@@ -226,7 +225,6 @@ RecoveryBackend::handle_scan_get_digest(
     std::ignore = shard_services.start_operation<crimson::osd::LocalPeeringEvent>(
       // TODO: abstract start_background_recovery
       static_cast<crimson::osd::PG*>(&pg),
-      shard_services,
       pg.get_pg_whoami(),
       pg.get_pgid(),
       pg.get_osdmap_epoch(),