]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
osd: Optimized EC present_shards no longer needed
authorBill Scales <bill_scales@uk.ibm.com>
Wed, 30 Jul 2025 11:41:34 +0000 (12:41 +0100)
committerAlex Ainscow <aainscow@uk.ibm.com>
Wed, 17 Sep 2025 08:43:26 +0000 (09:43 +0100)
present_shards is no longer needed in the PG log entry, this has been
replaced with code in proc_master_log that calculates which shards were
in the last epoch started and are still present.

Signed-off-by: Bill Scales <bill_scales@uk.ibm.com>
(cherry picked from commit 880a17e39626d99a0b6cc8259523daa83c72802c)

src/osd/ECTransaction.cc
src/osd/ECTransaction.h
src/osd/PGBackend.cc
src/osd/PeeringState.cc
src/osd/osd_types.cc
src/osd/osd_types.h

index 13c2428b94a49e35b8137e40c6fafa74fd44189b..33e4f063ec8950446f1a0ce8ed470b385aeaff84 100644 (file)
@@ -588,7 +588,7 @@ ECTransaction::Generate::Generate(PGTransaction &t,
     shard_written(shard_id_t(0));
   }
 
-  written_and_present_shards();
+  written_shards();
 
   if (!op.attr_updates.empty()) {
     attr_updates();
@@ -855,7 +855,7 @@ void ECTransaction::Generate::appends_and_clone_ranges() {
   }
 }
 
-void ECTransaction::Generate::written_and_present_shards() {
+void ECTransaction::Generate::written_shards() {
   if (entry) {
     if (!rollback_extents.empty()) {
       entry->mod_desc.rollback_extents(
@@ -869,14 +869,6 @@ void ECTransaction::Generate::written_and_present_shards() {
       entry->written_shards.clear();
       written_shards_final = true;
     }
-    // Calculate set of present shards
-    for (auto &&[shard, t]: transactions) {
-      entry->present_shards.insert(shard);
-    }
-    if (entry->present_shards.size() == sinfo.get_k_plus_m()) {
-      // More efficient to encode an empty set for all shards
-      entry->present_shards.clear();
-    }
 
     // Update shard_versions in object_info to record which shards are being
     // written
@@ -932,7 +924,6 @@ void ECTransaction::Generate::written_and_present_shards() {
       }
       ldpp_dout(dpp, 20) << __func__ << " shard_info: oid=" << oid
                          << " version=" << entry->version
-                         << " present=" << entry->present_shards
                          << " written=" << entry->written_shards
                          << " shard_versions=" << oi.shard_versions << dendl;
     }
index bda0fbb725df10d1ab0e95a03397403c5df2aa28..ed946ae888a59b568f1a17ec0acadfcd620d1b67 100644 (file)
@@ -106,7 +106,7 @@ class Generate {
   void truncate();
   void overlay_writes();
   void appends_and_clone_ranges();
-  void written_and_present_shards();
+  void written_shards();
   void attr_updates();
 
  public:
index 3eedd4d462d163b101d37e752ab832c6b08e37f6..6e43dbd9b80d90183b653aa21df13a966d581755 100644 (file)
@@ -423,7 +423,6 @@ void PGBackend::partial_write(
   auto dpp = get_parent()->get_dpp();
   ldpp_dout(dpp, 20) << __func__ << " version=" << entry.version
                     << " written_shards=" << entry.written_shards
-                    << " present_shards=" << entry.present_shards
                     << " pwlc=" << info->partial_writes_last_complete
                     << " previous_version=" << previous_version
                     << dendl;
index 7b2e9b825512a6b407bd4b054ba78bc7beea4bdf..093dc38d882c78ea314e10c24cf94ff82034f160 100644 (file)
@@ -3428,10 +3428,8 @@ void PeeringState::proc_master_log(
       for (auto&& [pg_shard, pi] : all_info) {
        psdout(20) << "version " << p->version
                   << " testing osd " << pg_shard
-                  << " written=" << p->written_shards
-                  << " present=" << p->present_shards << dendl;
-       if (p->is_present_shard(pg_shard.shard) &&
-           p->is_written_shard(pg_shard.shard)) {
+                  << " written=" << p->written_shards << dendl;
+       if (p->is_written_shard(pg_shard.shard)) {
          if (pi.last_update < p->version) {
            if (!shards_with_update.contains(pg_shard.shard)) {
              shards_without_update.insert(pg_shard.shard);
index 0c47e84dc6c2530118c4ac85a1afcb7999cd800c..734dfd003fed33b287224b4723989ce9a1ab7520 100644 (file)
@@ -5007,7 +5007,8 @@ void pg_log_entry_t::encode(ceph::buffer::list &bl) const
     encode(return_code, bl);
   encode(op_returns, bl);
   encode(written_shards, bl);
-  encode(present_shards, bl);
+  shard_id_set unused;
+  encode(unused, bl);
   ENCODE_FINISH(bl);
 }
 
@@ -5081,7 +5082,8 @@ void pg_log_entry_t::decode(ceph::buffer::list::const_iterator &bl)
   }
   if (struct_v >= 15) {
     decode(written_shards, bl);
-    decode(present_shards, bl);
+    shard_id_set unused;
+    decode(unused, bl);
   }
   DECODE_FINISH(bl);
 }
@@ -5133,7 +5135,6 @@ void pg_log_entry_t::dump(Formatter *f) const
     f->close_section();
   }
   f->dump_stream("written_shards") << written_shards;
-  f->dump_stream("present_shards") << present_shards;
   {
     f->open_object_section("mod_desc");
     mod_desc.dump(f);
index 607b030da7d1de759f2b0e187252b1a0b7a51547..58d83be0d315b1ce6c5dc73fc5b8d24f920fde7f 100644 (file)
@@ -4497,7 +4497,6 @@ struct pg_log_entry_t {
   ObjectCleanRegions clean_regions;
 
   shard_id_set written_shards; // EC partial writes do not update every shard
-  shard_id_set present_shards; // EC partial writes need to know set of present shards
 
   pg_log_entry_t()
    : user_version(0), return_code(0), op(0),
@@ -4572,9 +4571,6 @@ struct pg_log_entry_t {
   bool is_written_shard(const shard_id_t shard) const {
     return written_shards.empty() || written_shards.contains(shard);
   }
-  bool is_present_shard(const shard_id_t shard) const {
-    return present_shards.empty() || present_shards.contains(shard);
-  }
 
   void encode_with_checksum(ceph::buffer::list& bl) const;
   void decode_with_checksum(ceph::buffer::list::const_iterator& p);