]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
osd: ReadPipeline doesn't depend on classical ECBackend at all
authorRadosław Zarzyński <rzarzyns@redhat.com>
Tue, 19 Sep 2023 12:51:53 +0000 (14:51 +0200)
committerRadoslaw Zarzynski <rzarzyns@redhat.com>
Wed, 10 Jan 2024 17:27:12 +0000 (17:27 +0000)
Signed-off-by: Radosław Zarzyński <rzarzyns@redhat.com>
src/osd/ECBackend.cc
src/osd/ECBackend.h

index a19e95cba9d1c30727133c4d765ace87aefbb56a..6750a8a538d8080f493726e96ffa0bd99c48aa6b 100644 (file)
@@ -634,7 +634,7 @@ void ECBackend::continue_recovery_op(
       }
 
       map<pg_shard_t, vector<pair<int, int>>> to_read;
-      int r = get_min_avail_to_read_shards(
+      int r = read_pipeline.get_min_avail_to_read_shards(
        op.hoid, want, true, false, &to_read);
       if (r != 0) {
        // we must have lost a recovery source
@@ -1728,7 +1728,7 @@ void ECCommon::ReadPipeline::get_all_avail_shards(
   }
 }
 
-int ECBackend::get_min_avail_to_read_shards(
+int ECCommon::ReadPipeline::get_min_avail_to_read_shards(
   const hobject_t &hoid,
   const set<int> &want,
   bool for_recovery,
@@ -1742,7 +1742,7 @@ int ECBackend::get_min_avail_to_read_shards(
   map<shard_id_t, pg_shard_t> shards;
   set<pg_shard_t> error_shards;
 
-  read_pipeline.get_all_avail_shards(hoid, error_shards, have, shards, for_recovery);
+  get_all_avail_shards(hoid, error_shards, have, shards, for_recovery);
 
   map<int, vector<pair<int, int>>> need;
   int r = ec_impl->minimum_to_decode(want, have, &need);
@@ -2478,11 +2478,20 @@ void ECBackend::objects_read_and_reconstruct(
   GenContextURef<map<hobject_t,pair<int, extent_map> > &&> &&func)
 {
   return read_pipeline.objects_read_and_reconstruct(
-    *this, reads, fast_read, std::move(func));
+    reads, fast_read, std::move(func));
+}
+
+void ECCommon::ReadPipeline::get_want_to_read_shards(
+  std::set<int> *want_to_read) const
+{
+  const std::vector<int> &chunk_mapping = ec_impl->get_chunk_mapping();
+  for (int i = 0; i < (int)ec_impl->get_data_chunk_count(); ++i) {
+    int chunk = (int)chunk_mapping.size() > i ? chunk_mapping[i] : i;
+    want_to_read->insert(chunk);
+  }
 }
 
 void ECCommon::ReadPipeline::objects_read_and_reconstruct(
-  ECBackend& ec_backend,
   const map<hobject_t,
     std::list<boost::tuple<uint64_t, uint64_t, uint32_t> >
   > &reads,
@@ -2498,12 +2507,12 @@ void ECCommon::ReadPipeline::objects_read_and_reconstruct(
 
   map<hobject_t, set<int>> obj_want_to_read;
   set<int> want_to_read;
-  ec_backend.get_want_to_read_shards(&want_to_read);
+  get_want_to_read_shards(&want_to_read);
     
   map<hobject_t, read_request_t> for_read_op;
   for (auto &&to_read: reads) {
     map<pg_shard_t, vector<pair<int, int>>> shards;
-    int r = ec_backend.get_min_avail_to_read_shards(
+    int r = get_min_avail_to_read_shards(
       to_read.first,
       want_to_read,
       false,
index 30b6f017f96817f0dff78c8e95689d802fb7bc95..ae169fd8ff34f578cb7e9f6c340799564c42435d 100644 (file)
@@ -90,12 +90,12 @@ struct RecoveryMessages;
     virtual spg_t primary_spg_t() const = 0;
     virtual const PGLog &get_log() const = 0;
     virtual DoutPrefixProvider *get_dpp() = 0;
+    // XXX
     virtual void apply_stats(
        const hobject_t &soid,
        const object_stat_sum_t &delta_stats) = 0;
   };
 
-struct ECBackend;
 struct ECCommon {
   struct read_request_t {
     const std::list<boost::tuple<uint64_t, uint64_t, uint32_t> > to_read;
@@ -237,7 +237,6 @@ struct ECCommon {
   };
   struct ReadPipeline {
     void objects_read_and_reconstruct(
-      ECBackend& ecbackend,
       const std::map<hobject_t, std::list<boost::tuple<uint64_t, uint64_t, uint32_t> >
       > &reads,
       bool fast_read,
@@ -315,6 +314,17 @@ struct ECCommon {
 
     friend ostream &operator<<(ostream &lhs, const ReadOp &rhs);
     friend struct FinishReadOp;
+
+    void get_want_to_read_shards(std::set<int> *want_to_read) const;
+
+    /// Returns to_read replicas sufficient to reconstruct want
+    int get_min_avail_to_read_shards(
+      const hobject_t &hoid,     ///< [in] object
+      const std::set<int> &want,      ///< [in] desired shards
+      bool for_recovery,         ///< [in] true if we may use non-acting replicas
+      bool do_redundant_reads,   ///< [in] true if we want to issue redundant reads to reduce latency
+      std::map<pg_shard_t, std::vector<std::pair<int, int>>> *to_read   ///< [out] shards, corresponding subchunks to read
+      ); ///< @return error code, 0 on success
   };
 };
 
@@ -445,16 +455,6 @@ private:
                        sinfo.get_stripe_width());
   }
 
-public:
-  void get_want_to_read_shards(std::set<int> *want_to_read) const {
-    const std::vector<int> &chunk_mapping = ec_impl->get_chunk_mapping();
-    for (int i = 0; i < (int)ec_impl->get_data_chunk_count(); ++i) {
-      int chunk = (int)chunk_mapping.size() > i ? chunk_mapping[i] : i;
-      want_to_read->insert(chunk);
-    }
-  }
-private:
-
   /**
    * Recovery
    *
@@ -831,15 +831,6 @@ public:
     ceph::ErasureCodeInterfaceRef ec_impl,
     uint64_t stripe_width);
 
-  /// Returns to_read replicas sufficient to reconstruct want
-  int get_min_avail_to_read_shards(
-    const hobject_t &hoid,     ///< [in] object
-    const std::set<int> &want,      ///< [in] desired shards
-    bool for_recovery,         ///< [in] true if we may use non-acting replicas
-    bool do_redundant_reads,   ///< [in] true if we want to issue redundant reads to reduce latency
-    std::map<pg_shard_t, std::vector<std::pair<int, int>>> *to_read   ///< [out] shards, corresponding subchunks to read
-    ); ///< @return error code, 0 on success
-
   int objects_get_attrs(
     const hobject_t &hoid,
     std::map<std::string, ceph::buffer::list, std::less<>> *out) override;