]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
crimson/osd/osd_operations: consistent naming to pipeline users
authorMatan Breizman <mbreizma@redhat.com>
Wed, 28 Jun 2023 09:07:26 +0000 (09:07 +0000)
committerMatan Breizman <mbreizma@redhat.com>
Tue, 17 Oct 2023 16:20:45 +0000 (16:20 +0000)
Rename pp/bp to client_pp or peering_pp.

Signed-off-by: Matan Breizman <mbreizma@redhat.com>
(cherry picked from commit 9f3dc8db7195b6a42499b46c7dbea872d30788b9)

16 files changed:
src/crimson/osd/osd_operations/background_recovery.cc
src/crimson/osd/osd_operations/background_recovery.h
src/crimson/osd/osd_operations/client_request.cc
src/crimson/osd/osd_operations/client_request.h
src/crimson/osd/osd_operations/internal_client_request.cc
src/crimson/osd/osd_operations/internal_client_request.h
src/crimson/osd/osd_operations/logmissing_request.cc
src/crimson/osd/osd_operations/logmissing_request.h
src/crimson/osd/osd_operations/logmissing_request_reply.cc
src/crimson/osd/osd_operations/logmissing_request_reply.h
src/crimson/osd/osd_operations/peering_event.cc
src/crimson/osd/osd_operations/peering_event.h
src/crimson/osd/osd_operations/replicated_request.cc
src/crimson/osd/osd_operations/replicated_request.h
src/crimson/osd/osd_operations/snaptrim_event.cc
src/crimson/osd/osd_operations/snaptrim_event.h

index 41ad87a32bf24a37a81a2caf8342f6d973b9a8da..953ec9595dae782ed6859e16b4be5b9cc2761e9f 100644 (file)
@@ -172,7 +172,7 @@ PglogBasedRecovery::do_recovery()
   });
 }
 
-PGPeeringPipeline &BackfillRecovery::bp(PG &pg)
+PGPeeringPipeline &BackfillRecovery::peering_pp(PG &pg)
 {
   return pg.peering_request_pg_pipeline;
 }
@@ -193,7 +193,7 @@ BackfillRecovery::do_recovery()
     // with the backfill_pipeline we protect it from a second entry from
     // the implementation of BackfillListener.
     // additionally, this stage serves to synchronize with PeeringEvent.
-    bp(*pg).process
+    peering_pp(*pg).process
   ).then_interruptible([this] {
     pg->get_recovery_handler()->dispatch_backfill_event(std::move(evt));
     return seastar::make_ready_future<bool>(false);
index 4a1ea1900c65fa60d33f7c110b7aa58bac502545..17f2cd57a305af359b27ac893c7d2a5d48481c0c 100644 (file)
@@ -116,7 +116,7 @@ private:
   boost::intrusive_ptr<const boost::statechart::event_base> evt;
   PipelineHandle handle;
 
-  static PGPeeringPipeline &bp(PG &pg);
+  static PGPeeringPipeline &peering_pp(PG &pg);
   interruptible_future<bool> do_recovery() override;
 };
 
index c46c419c7b9081aad4ad1d51eaad2b27a4575f17..9374fbde2cc06446f29324e8cb3533eceeec9d77 100644 (file)
@@ -81,7 +81,7 @@ ConnectionPipeline &ClientRequest::get_connection_pipeline()
   return get_osd_priv(conn.get()).client_request_conn_pipeline;
 }
 
-ClientRequest::PGPipeline &ClientRequest::pp(PG &pg)
+ClientRequest::PGPipeline &ClientRequest::client_pp(PG &pg)
 {
   return pg.request_pg_pipeline;
 }
@@ -118,7 +118,7 @@ seastar::future<> ClientRequest::with_pg_int(
          return interruptor::now();
        });
       }
-      return ihref.enter_stage<interruptor>(pp(pg).await_map, *this
+      return ihref.enter_stage<interruptor>(client_pp(pg).await_map, *this
       ).then_interruptible([this, this_instance_id, &pg, &ihref] {
        logger().debug("{}.{}: after await_map stage", *this, this_instance_id);
        return ihref.enter_blocker(
@@ -126,7 +126,7 @@ seastar::future<> ClientRequest::with_pg_int(
          m->get_min_epoch(), nullptr);
       }).then_interruptible([this, this_instance_id, &pg, &ihref](auto map) {
        logger().debug("{}.{}: after wait_for_map", *this, this_instance_id);
-       return ihref.enter_stage<interruptor>(pp(pg).wait_for_active, *this);
+       return ihref.enter_stage<interruptor>(client_pp(pg).wait_for_active, *this);
       }).then_interruptible([this, this_instance_id, &pg, &ihref]() {
        logger().debug(
          "{}.{}: after wait_for_active stage", *this, this_instance_id);
@@ -197,7 +197,7 @@ ClientRequest::interruptible_future<>
 ClientRequest::process_op(instance_handle_t &ihref, Ref<PG> &pg)
 {
   return ihref.enter_stage<interruptor>(
-    pp(*pg).recover_missing,
+    client_pp(*pg).recover_missing,
     *this
   ).then_interruptible(
     [this, pg]() mutable {
@@ -219,7 +219,7 @@ ClientRequest::process_op(instance_handle_t &ihref, Ref<PG> &pg)
        reply->set_reply_versions(completed->version, completed->user_version);
         return conn->send(std::move(reply));
       } else {
-        return ihref.enter_stage<interruptor>(pp(*pg).get_obc, *this
+        return ihref.enter_stage<interruptor>(client_pp(*pg).get_obc, *this
        ).then_interruptible(
           [this, pg, &ihref]() mutable -> PG::load_obc_iertr::future<> {
           logger().debug("{}: in get_obc stage", *this);
@@ -229,7 +229,7 @@ ClientRequest::process_op(instance_handle_t &ihref, Ref<PG> &pg)
             [this, pg, &ihref](auto obc) mutable {
               logger().debug("{}: got obc {}", *this, obc->obs);
               return ihref.enter_stage<interruptor>(
-                pp(*pg).process, *this
+                client_pp(*pg).process, *this
               ).then_interruptible([this, pg, obc, &ihref]() mutable {
                 return do_process(ihref, pg, obc);
               });
@@ -314,12 +314,12 @@ ClientRequest::do_process(
   return pg->do_osd_ops(m, conn, obc, op_info, snapc).safe_then_unpack_interruptible(
     [this, pg, &ihref](auto submitted, auto all_completed) mutable {
       return submitted.then_interruptible([this, pg, &ihref] {
-       return ihref.enter_stage<interruptor>(pp(*pg).wait_repop, *this);
+       return ihref.enter_stage<interruptor>(client_pp(*pg).wait_repop, *this);
       }).then_interruptible(
        [this, pg, all_completed=std::move(all_completed), &ihref]() mutable {
          return all_completed.safe_then_interruptible(
            [this, pg, &ihref](MURef<MOSDOpReply> reply) {
-             return ihref.enter_stage<interruptor>(pp(*pg).send_reply, *this
+             return ihref.enter_stage<interruptor>(client_pp(*pg).send_reply, *this
              ).then_interruptible(
                [this, reply=std::move(reply)]() mutable {
                  logger().debug("{}: sending response", *this);
index 4338ac4169759120a5555afe1c0318834f7fc86d..b2dce1e873e1839c811711a1c8657116a03c6729 100644 (file)
@@ -244,7 +244,7 @@ private:
       Ref<PG> &pg);
   bool is_pg_op() const;
 
-  PGPipeline &pp(PG &pg);
+  PGPipeline &client_pp(PG &pg);
 
   template <typename Errorator>
   using interruptible_errorator =
index e71804d88ea718f58ff48cf0b1230ff55b376bf4..1e9b842b2ec7a61444d933b56b60009e2da1112c 100644 (file)
@@ -43,7 +43,7 @@ void InternalClientRequest::dump_detail(Formatter *f) const
 {
 }
 
-CommonPGPipeline& InternalClientRequest::pp()
+CommonPGPipeline& InternalClientRequest::client_pp()
 {
   return pg->request_pg_pipeline;
 }
@@ -56,7 +56,7 @@ seastar::future<> InternalClientRequest::start()
       logger().debug("{}: in repeat", *this);
       return interruptor::with_interruption([this]() mutable {
         return enter_stage<interruptor>(
-         pp().wait_for_active
+         client_pp().wait_for_active
         ).then_interruptible([this] {
           return with_blocking_event<PGActivationBlocker::BlockingEvent,
                                     interruptor>([this] (auto&& trigger) {
@@ -64,12 +64,12 @@ seastar::future<> InternalClientRequest::start()
           });
         }).then_interruptible([this] {
           return enter_stage<interruptor>(
-            pp().recover_missing);
+            client_pp().recover_missing);
         }).then_interruptible([this] {
           return do_recover_missing(pg, get_target_oid());
         }).then_interruptible([this] {
           return enter_stage<interruptor>(
-            pp().get_obc);
+            client_pp().get_obc);
         }).then_interruptible([this] () -> PG::load_obc_iertr::future<> {
           logger().debug("{}: getting obc lock", *this);
           return seastar::do_with(create_osd_ops(),
@@ -81,7 +81,8 @@ seastar::future<> InternalClientRequest::start()
             assert(ret == 0);
             return pg->with_locked_obc(get_target_oid(), op_info,
               [&osd_ops, this](auto obc) {
-              return enter_stage<interruptor>(pp().process).then_interruptible(
+              return enter_stage<interruptor>(client_pp().process
+              ).then_interruptible(
                 [obc=std::move(obc), &osd_ops, this] {
                 return pg->do_osd_ops(
                   std::move(obc),
index ca78905ea96078a6ce0199e18478a4d9c38c3ffd..8eed12e050e19fa6b212f1859c55bdbb43b4fe0a 100644 (file)
@@ -39,7 +39,7 @@ private:
   void print(std::ostream &) const final;
   void dump_detail(Formatter *f) const final;
 
-  CommonPGPipeline& pp();
+  CommonPGPipeline& client_pp();
 
   seastar::future<> do_process();
 
index 5dfb290f945c3fccdecb39c9ed2ed02d8cfd028c..630869d5d884fb65dbe83cfa239d407a96839634 100644 (file)
@@ -49,7 +49,7 @@ ConnectionPipeline &LogMissingRequest::get_connection_pipeline()
   return get_osd_priv(conn.get()).replicated_request_conn_pipeline;
 }
 
-ClientRequest::PGPipeline &LogMissingRequest::pp(PG &pg)
+ClientRequest::PGPipeline &LogMissingRequest::client_pp(PG &pg)
 {
   return pg.request_pg_pipeline;
 }
index 4ab87996f3afbf68f37b7b6d6a0031ab045cfeb8..ebfc520ede713a7ad4654a0e170d07cfe81e9ae5 100644 (file)
@@ -64,7 +64,7 @@ public:
   > tracking_events;
 
 private:
-  ClientRequest::PGPipeline &pp(PG &pg);
+  ClientRequest::PGPipeline &client_pp(PG &pg);
 
   crimson::net::ConnectionRef conn;
   // must be after `conn` to ensure the ConnectionPipeline's is alive
index 95a968c1455d2d737df454372adcdc1012517de2..b4bf2938e05bca0e1b93255c1d8596df982dd07a 100644 (file)
@@ -49,7 +49,7 @@ ConnectionPipeline &LogMissingRequestReply::get_connection_pipeline()
   return get_osd_priv(conn.get()).replicated_request_conn_pipeline;
 }
 
-ClientRequest::PGPipeline &LogMissingRequestReply::pp(PG &pg)
+ClientRequest::PGPipeline &LogMissingRequestReply::client_pp(PG &pg)
 {
   return pg.request_pg_pipeline;
 }
index cb39e9f6c2b42eca1a360c9ce9bf6766270b6576..c89131fec1d7deeb80b6f5ad8dabc1dea4bedfce 100644 (file)
@@ -64,7 +64,7 @@ public:
   > tracking_events;
 
 private:
-  ClientRequest::PGPipeline &pp(PG &pg);
+  ClientRequest::PGPipeline &client_pp(PG &pg);
 
   crimson::net::ConnectionRef conn;
   // must be after `conn` to ensure the ConnectionPipeline's is alive
index b323b4a817bf29262c75b38001bb154cd8d6976a..ea4662bd01e0b72b9680bd8d1f4eac2584966569 100644 (file)
@@ -54,7 +54,7 @@ void PeeringEvent<T>::dump_detail(Formatter *f) const
 
 
 template <class T>
-PGPeeringPipeline &PeeringEvent<T>::pp(PG &pg)
+PGPeeringPipeline &PeeringEvent<T>::peering_pp(PG &pg)
 {
   return pg.peering_request_pg_pipeline;
 }
@@ -73,7 +73,7 @@ seastar::future<> PeeringEvent<T>::with_pg(
   using interruptor = typename T::interruptor;
   return interruptor::with_interruption([this, pg, &shard_services] {
     logger().debug("{}: pg present", *this);
-    return this->template enter_stage<interruptor>(pp(*pg).await_map
+    return this->template enter_stage<interruptor>(peering_pp(*pg).await_map
     ).then_interruptible([this, pg] {
       return this->template with_blocking_event<
        PG_OSDMapGate::OSDMapBlocker::BlockingEvent
@@ -82,7 +82,7 @@ seastar::future<> PeeringEvent<T>::with_pg(
            std::move(trigger), evt.get_epoch_sent());
        });
     }).then_interruptible([this, pg](auto) {
-      return this->template enter_stage<interruptor>(pp(*pg).process);
+      return this->template enter_stage<interruptor>(peering_pp(*pg).process);
     }).then_interruptible([this, pg, &shard_services] {
       return pg->do_peering_event(evt, ctx
       ).then_interruptible([this, pg, &shard_services] {
index d9c9da58a17fcc088d9579ab784c2717830f0976..e94caead199256ac543039fc656f522271bb283c 100644 (file)
@@ -51,7 +51,7 @@ public:
   static constexpr OperationTypeCode type = OperationTypeCode::peering_event;
 
 protected:
-  PGPeeringPipeline &pp(PG &pg);
+  PGPeeringPipeline &peering_pp(PG &pg);
 
   PeeringCtx ctx;
   pg_shard_t from;
index f7d4fa68b2b30443836c191c9d2665bbdb980321..f0444a7c432229a8bcc666d71667a2025a63cc97 100644 (file)
@@ -49,7 +49,7 @@ ConnectionPipeline &RepRequest::get_connection_pipeline()
   return get_osd_priv(conn.get()).replicated_request_conn_pipeline;
 }
 
-ClientRequest::PGPipeline &RepRequest::pp(PG &pg)
+ClientRequest::PGPipeline &RepRequest::client_pp(PG &pg)
 {
   return pg.request_pg_pipeline;
 }
@@ -61,7 +61,7 @@ seastar::future<> RepRequest::with_pg(
   IRef ref = this;
   return interruptor::with_interruption([this, pg] {
     logger().debug("{}: pg present", *this);
-    return this->template enter_stage<interruptor>(pp(*pg).await_map
+    return this->template enter_stage<interruptor>(client_pp(*pg).await_map
     ).then_interruptible([this, pg] {
       return this->template with_blocking_event<
         PG_OSDMapGate::OSDMapBlocker::BlockingEvent
index 78d97ecf439fa0d85cd3f332ec39ed0fd64df109..c742888d9390a5c58012f389db8bb8eba6379b50 100644 (file)
@@ -66,7 +66,7 @@ public:
   > tracking_events;
 
 private:
-  ClientRequest::PGPipeline &pp(PG &pg);
+  ClientRequest::PGPipeline &client_pp(PG &pg);
 
   crimson::net::ConnectionRef conn;
   PipelineHandle handle;
index 4a32b567cd4680c1d7feb0408d3fd9e3d492860c..e4a1b04df142fa39da427734bc28a319a021c14f 100644 (file)
@@ -92,7 +92,7 @@ SnapTrimEvent::start()
   });
 }
 
-CommonPGPipeline& SnapTrimEvent::pp()
+CommonPGPipeline& SnapTrimEvent::client_pp()
 {
   return pg->request_pg_pipeline;
 }
@@ -103,7 +103,7 @@ SnapTrimEvent::with_pg(
 {
   return interruptor::with_interruption([&shard_services, this] {
     return enter_stage<interruptor>(
-      pp().wait_for_active
+      client_pp().wait_for_active
     ).then_interruptible([this] {
       return with_blocking_event<PGActivationBlocker::BlockingEvent,
                                  interruptor>([this] (auto&& trigger) {
@@ -111,18 +111,18 @@ SnapTrimEvent::with_pg(
       });
     }).then_interruptible([this] {
       return enter_stage<interruptor>(
-        pp().recover_missing);
+        client_pp().recover_missing);
     }).then_interruptible([] {
       //return do_recover_missing(pg, get_target_oid());
       return seastar::now();
     }).then_interruptible([this] {
       return enter_stage<interruptor>(
-        pp().get_obc);
+        client_pp().get_obc);
     }).then_interruptible([this] {
       return pg->snaptrim_mutex.lock(*this);
     }).then_interruptible([this] {
       return enter_stage<interruptor>(
-        pp().process);
+        client_pp().process);
     }).then_interruptible([&shard_services, this] {
       return interruptor::async([this] {
         std::vector<hobject_t> to_trim;
@@ -207,7 +207,7 @@ SnapTrimEvent::with_pg(
 }
 
 
-CommonPGPipeline& SnapTrimObjSubEvent::pp()
+CommonPGPipeline& SnapTrimObjSubEvent::client_pp()
 {
   return pg->request_pg_pipeline;
 }
@@ -497,7 +497,7 @@ SnapTrimObjSubEvent::with_pg(
   ShardServices &shard_services, Ref<PG> _pg)
 {
   return enter_stage<interruptor>(
-    pp().wait_for_active
+    client_pp().wait_for_active
   ).then_interruptible([this] {
     return with_blocking_event<PGActivationBlocker::BlockingEvent,
                                interruptor>([this] (auto&& trigger) {
@@ -505,13 +505,13 @@ SnapTrimObjSubEvent::with_pg(
     });
   }).then_interruptible([this] {
     return enter_stage<interruptor>(
-      pp().recover_missing);
+      client_pp().recover_missing);
   }).then_interruptible([] {
     //return do_recover_missing(pg, get_target_oid());
     return seastar::now();
   }).then_interruptible([this] {
     return enter_stage<interruptor>(
-      pp().get_obc);
+      client_pp().get_obc);
   }).then_interruptible([this] {
     logger().debug("{}: getting obc for {}", *this, coid);
     // end of commonality
@@ -521,7 +521,7 @@ SnapTrimObjSubEvent::with_pg(
       [this](auto head_obc, auto clone_obc) {
       logger().debug("{}: got clone_obc={}", *this, clone_obc->get_oid());
       return enter_stage<interruptor>(
-        pp().process
+        client_pp().process
       ).then_interruptible(
         [this,clone_obc=std::move(clone_obc), head_obc=std::move(head_obc)]() mutable {
         logger().debug("{}: processing clone_obc={}", *this, clone_obc->get_oid());
index 851b7b0f0b3865e2d58d04f4d23897c3471343af..a3a970a04c7d017a5f65ae5b1743ea96ac29bdf3 100644 (file)
@@ -57,7 +57,7 @@ public:
     ShardServices &shard_services, Ref<PG> pg);
 
 private:
-  CommonPGPipeline& pp();
+  CommonPGPipeline& client_pp();
 
   // bases on 998cb8c141bb89aafae298a9d5e130fbd78fe5f2
   struct SubOpBlocker : crimson::BlockerT<SubOpBlocker> {
@@ -143,7 +143,7 @@ public:
   remove_or_update_iertr::future<> with_pg(
     ShardServices &shard_services, Ref<PG> pg);
 
-  CommonPGPipeline& pp();
+  CommonPGPipeline& client_pp();
 
 private:
   object_stat_sum_t delta_stats;