]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd/osd_operations/client_request: cleanup shard_services
authorYingxin Cheng <yingxin.cheng@intel.com>
Mon, 8 Jan 2024 06:25:20 +0000 (14:25 +0800)
committerYingxin Cheng <yingxin.cheng@intel.com>
Mon, 22 Jan 2024 08:43:49 +0000 (16:43 +0800)
Signed-off-by: Yingxin Cheng <yingxin.cheng@intel.com>
src/crimson/osd/osd_operations/client_request.cc
src/crimson/osd/osd_operations/client_request.h
src/crimson/osd/pg.cc

index 0936889e37e3054f3b6db6b8037cdaf970bd4095..1611531fcd0b974236668ede1c062db8e483b546 100644 (file)
@@ -19,14 +19,13 @@ SET_SUBSYS(osd);
 namespace crimson::osd {
 
 
-void ClientRequest::Orderer::requeue(
-  ShardServices &shard_services, Ref<PG> pg)
+void ClientRequest::Orderer::requeue(Ref<PG> pg)
 {
   LOG_PREFIX(ClientRequest::Orderer::requeue);
   for (auto &req: list) {
     DEBUGDPP("requeueing {}", *pg, req);
     req.reset_instance_handle();
-    std::ignore = req.with_pg_int(shard_services, pg);
+    std::ignore = req.with_pg_int(pg);
   }
 }
 
@@ -47,9 +46,9 @@ void ClientRequest::complete_request()
 }
 
 ClientRequest::ClientRequest(
-  ShardServices &shard_services, crimson::net::ConnectionRef conn,
+  ShardServices &_shard_services, crimson::net::ConnectionRef conn,
   Ref<MOSDOp> &&m)
-  : put_historic_shard_services(&shard_services),
+  : shard_services(&_shard_services),
     conn(std::move(conn)),
     m(std::move(m)),
     instance_handle(new instance_handle_t)
@@ -98,9 +97,10 @@ bool ClientRequest::is_pg_op() const
     [](auto& op) { return ceph_osd_op_type_pg(op.op.op); });
 }
 
-seastar::future<> ClientRequest::with_pg_int(
-  ShardServices &shard_services, Ref<PG> pgref)
+seastar::future<> ClientRequest::with_pg_int(Ref<PG> pgref)
 {
+  ceph_assert_always(shard_services);
+
   LOG_PREFIX(ClientRequest::with_pg_int);
   epoch_t same_interval_since = pgref->get_interval_start_epoch();
   DEBUGDPP("{}: same_interval_since: {}", *pgref, *this, same_interval_since);
@@ -112,11 +112,11 @@ seastar::future<> ClientRequest::with_pg_int(
   auto instance_handle = get_instance_handle();
   auto &ihref = *instance_handle;
   return interruptor::with_interruption(
-    [FNAME, this, pgref, this_instance_id, &ihref, &shard_services]() mutable {
+    [FNAME, this, pgref, this_instance_id, &ihref]() mutable {
       DEBUGDPP("{} start", *pgref, *this);
       PG &pg = *pgref;
       if (pg.can_discard_op(*m)) {
-       return shard_services.send_incremental_map(
+       return shard_services->send_incremental_map(
          std::ref(*conn), m->get_map_epoch()
        ).then([FNAME, this, this_instance_id, pgref] {
          DEBUGDPP("{}: discarding {}", *pgref, *this, this_instance_id);
@@ -179,14 +179,12 @@ seastar::future<> ClientRequest::with_pg_int(
 }
 
 seastar::future<> ClientRequest::with_pg(
-  ShardServices &shard_services, Ref<PG> pgref)
+  ShardServices &_shard_services, Ref<PG> pgref)
 {
-  put_historic_shard_services = &shard_services;
+  shard_services = &_shard_services;
   pgref->client_request_orderer.add_request(*this);
   auto ret = on_complete.get_future();
-  std::ignore = with_pg_int(
-    shard_services, std::move(pgref)
-  );
+  std::ignore = with_pg_int(std::move(pgref));
   return ret;
 }
 
@@ -417,8 +415,8 @@ bool ClientRequest::is_misdirected(const PG& pg) const
 
 void ClientRequest::put_historic() const
 {
-  ceph_assert_always(put_historic_shard_services);
-  put_historic_shard_services->get_registry().put_historic(*this);
+  ceph_assert_always(shard_services);
+  shard_services->get_registry().put_historic(*this);
 }
 
 const SnapContext ClientRequest::get_snapc(
index 80645885a141287ac2a979601b4ef72fc4e5b060..b374aacbbe940fa2f86603327f1f98b37a18e84d 100644 (file)
@@ -29,9 +29,8 @@ class ShardServices;
 
 class ClientRequest final : public PhasedOperationT<ClientRequest>,
                             private CommonClientRequest {
-  // Initially set to primary core, updated to pg core after move,
-  // used by put_historic
-  ShardServices *put_historic_shard_services = nullptr;
+  // Initially set to primary core, updated to pg core after with_pg()
+  ShardServices *shard_services = nullptr;
 
   crimson::net::ConnectionRef conn;
   // must be after conn due to ConnectionPipeline's life-time
@@ -199,7 +198,7 @@ public:
       list.erase(list_t::s_iterator_to(request));
       intrusive_ptr_release(&request);
     }
-    void requeue(ShardServices &shard_services, Ref<PG> pg);
+    void requeue(Ref<PG> pg);
     void clear_and_cancel(PG &pg);
   };
   void complete_request();
@@ -243,8 +242,7 @@ public:
     conn = make_local_shared_foreign(std::move(_conn));
   }
 
-  seastar::future<> with_pg_int(
-    ShardServices &shard_services, Ref<PG> pg);
+  seastar::future<> with_pg_int(Ref<PG> pg);
 
 public:
   seastar::future<> with_pg(
index 0e16f6f118ba7cee617af660d1d8db1b097f2d88..7638b2fcdbeb2c75f771639a8eb673cf9e9e1777 100644 (file)
@@ -1523,7 +1523,7 @@ void PG::on_change(ceph::os::Transaction &t) {
   wait_for_active_blocker.unblock();
   if (is_primary()) {
     logger().debug("{} {}: requeueing", *this, __func__);
-    client_request_orderer.requeue(shard_services, this);
+    client_request_orderer.requeue(this);
   } else {
     logger().debug("{} {}: dropping requests", *this, __func__);
     client_request_orderer.clear_and_cancel(*this);