]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/osd/ops_executer: fix snap overlap range error 57510/head
authorjunxiang Mu <1948535941@qq.com>
Mon, 1 Apr 2024 07:00:14 +0000 (03:00 -0400)
committerMatan Breizman <mbreizma@redhat.com>
Thu, 16 May 2024 11:49:31 +0000 (14:49 +0300)
Fixes: https://tracker.ceph.com/issues/65113
Signed-off-by: junxiang Mu <1948535941@qq.com>
(cherry picked from commit 7eca779627a90dc80f54957cc49b25b4c965044d)

src/crimson/osd/ops_executer.cc
src/crimson/osd/ops_executer.h
src/crimson/osd/pg_backend.cc
src/test/librados/snapshots_cxx.cc

index b23c23bfca4d192879e0651eaff85c9293c3d929..10020e9adf8a1e32f0c37f9a4c7891f3a2828d63 100644 (file)
@@ -970,20 +970,30 @@ std::unique_ptr<OpsExecuter::CloningContext> OpsExecuter::execute_clone(
   osd_op_params->at_version.version++;
   encode(cloned_snaps, cloning_ctx->log_entry.snaps);
 
-  // update most recent clone_overlap and usage stats
-  assert(cloning_ctx->new_snapset.clones.size() > 0);
-  // In classic, we check for evicted clones before
-  // adjusting the clone_overlap.
-  // This check is redundant here since `clone_obc`
-  // was just created (See prepare_clone()).
-  interval_set<uint64_t> &newest_overlap =
-    cloning_ctx->new_snapset.clone_overlap.rbegin()->second;
-  osd_op_params->modified_ranges.intersection_of(newest_overlap);
-  delta_stats.num_bytes += osd_op_params->modified_ranges.size();
-  newest_overlap.subtract(osd_op_params->modified_ranges);
   return cloning_ctx;
 }
 
+void OpsExecuter::update_clone_overlap() {
+  interval_set<uint64_t> *newest_overlap;
+  if (cloning_ctx) {
+    newest_overlap =
+      &cloning_ctx->new_snapset.clone_overlap.rbegin()->second;
+  } else if (op_info.may_write() 
+    && obc->obs.exists 
+    && !snapc.snaps.empty() 
+    && !obc->ssc->snapset.clones.empty()) {
+    newest_overlap =
+      &obc->ssc->snapset.clone_overlap.rbegin()->second;
+  } else {
+    return;
+  }
+
+  assert(osd_op_params);
+  osd_op_params->modified_ranges.intersection_of(*newest_overlap);
+  newest_overlap->subtract(osd_op_params->modified_ranges);
+  delta_stats.num_bytes += osd_op_params->modified_ranges.size();
+}
+
 void OpsExecuter::CloningContext::apply_to(
   std::vector<pg_log_entry_t>& log_entries,
   ObjectContext& processed_obc) &&
@@ -1002,6 +1012,7 @@ OpsExecuter::flush_clone_metadata(
 {
   assert(!txn.empty());
   auto maybe_snap_mapped = interruptor::now();
+  update_clone_overlap();
   if (cloning_ctx) {
     std::move(*cloning_ctx).apply_to(log_entries, *obc);
     const auto& coid = log_entries.front().soid;
index 6bcbe5580208ecf3c9b950eaee5050af98916a5a..60b5c20aecddac967d11406e4ccdeca99022c36f 100644 (file)
@@ -255,6 +255,14 @@ private:
       && snapc.snaps[0] > initial_obc.ssc->snapset.seq; // existing obj is old
   }
 
+  /**
+  * update_clone_overlap
+  *
+  * We need to update the most recent snapshot and the overlapping
+  * part of the head object for each write operation.
+  */
+  void update_clone_overlap();
+
   interruptible_future<std::vector<pg_log_entry_t>> flush_clone_metadata(
     std::vector<pg_log_entry_t>&& log_entries,
     SnapMapper& snap_mapper,
index ab60cfd12730837037730c8ad34ff26bd1fb4ce3..143f522bb37510ae4adf3914522b78be35d6e480 100644 (file)
@@ -596,11 +596,11 @@ void PGBackend::update_size_and_usage(object_stat_sum_t& delta_stats,
   if (write_full) {
     if (oi.size) {
       ch.insert(0, oi.size);
-    } else if (length) {
-      ch.insert(offset, length);
     }
-    modified.union_of(ch);
+  } else if (length) {
+    ch.insert(offset, length);
   }
+  modified.union_of(ch);
   if (write_full ||
       (offset + length > oi.size && length)) {
     uint64_t new_size = offset + length;
index 95dbe5da0125aac407b964ee97afaf0332c764c7..3338b62056ea9c05909372fc64b4d4c8d1b3c47f 100644 (file)
@@ -238,8 +238,6 @@ TEST_F(LibRadosSnapshotsSelfManagedPP, RollbackPP) {
 }
 
 TEST_F(LibRadosSnapshotsSelfManagedPP, SnapOverlapPP) {
-  // WIP https://tracker.ceph.com/issues/58263
-  SKIP_IF_CRIMSON();
   std::vector<uint64_t> my_snaps;
   IoCtx readioctx;
   ASSERT_EQ(0, cluster.ioctx_create(pool_name.c_str(), readioctx));