]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/os/seastore/lba_manager/btree: move duplicate_for_write
authorSamuel Just <sjust@redhat.com>
Tue, 16 Jun 2020 19:32:39 +0000 (12:32 -0700)
committerSamuel Just <sjust@redhat.com>
Fri, 19 Jun 2020 19:59:26 +0000 (12:59 -0700)
Previously, it appears that I meant for the caller to insert to
invoke duplicate_for_write prior to calling insert().  I think
I chose that convention to avoid chained callbacks within a method
not on *this.  However, there are two scenarios where this is
necessary: leaf and split/merge.  The split/merge criteria are a
little involved and were only buggily considered.

This patch moves responsibilty for duplicate_for_write on *this
into insert/update.  Callers should assume that the callee will
handle it, and should not assume that they can call back into the
same node.

Signed-off-by: Samuel Just <sjust@redhat.com>
src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc
src/crimson/os/seastore/lba_manager/btree/lba_btree_node_impl.cc

index 0a5c9d1fa159bf9917e4af8ce2c108ca0121dfe0..9fcb03eda847ff97bd3886aa38089ca64b21eeea 100644 (file)
@@ -205,7 +205,6 @@ BtreeLBAManager::insert_mapping_ret BtreeLBAManager::insert_mapping(
       });
   }
   return split.safe_then([this, &t, laddr, val](LBANodeRef node) {
-    node = cache.duplicate_for_write(t, node)->cast<LBANode>();
     return node->insert(cache, t, laddr, val);
   });
 }
@@ -242,9 +241,6 @@ BtreeLBAManager::update_mapping_ret BtreeLBAManager::update_mapping(
 {
   return get_root(t
   ).safe_then([this, f=std::move(f), &t, addr](LBANodeRef root) mutable {
-    if (root->depth == 0) {
-      root = cache.duplicate_for_write(t, root)->cast<LBANode>();
-    }
     return root->mutate_mapping(
       cache,
       t,
index deb9758fa9b219bd5803e325d53f3019779862ff..8befb830ebb990d65f2253ff68a9ffc31df45238 100644 (file)
@@ -83,10 +83,6 @@ LBAInternalNode::insert_ret LBAInternalNode::insert(
          insert_ertr::make_ready_future<LBANodeRef>(std::move(extent));
       }).safe_then([&cache, &t, laddr, val=std::move(val)](
                     LBANodeRef extent) mutable {
-       if (extent->depth == 0) {
-         auto mut_extent = cache.duplicate_for_write(t, extent);
-         extent = mut_extent->cast<LBANode>();
-       }
        return extent->insert(cache, t, laddr, val);
       });
 }
@@ -105,26 +101,18 @@ LBAInternalNode::mutate_mapping_ret LBAInternalNode::mutate_mapping(
     get_paddr()
   ).safe_then([this, &cache, &t, laddr](LBANodeRef extent) {
     if (extent->at_min_capacity()) {
-      auto mut_this = cache.duplicate_for_write(
-       t, this)->cast<LBAInternalNode>();
-      return mut_this->merge_entry(
+      return merge_entry(
        cache,
        t,
        laddr,
-       mut_this->get_containing_child(laddr),
+       get_containing_child(laddr),
        extent);
     } else {
       return merge_ertr::make_ready_future<LBANodeRef>(
        std::move(extent));
     }
   }).safe_then([&cache, &t, laddr, f=std::move(f)](LBANodeRef extent) mutable {
-    if (extent->depth == 0) {
-      auto mut_extent = cache.duplicate_for_write(
-       t, extent)->cast<LBANode>();
-      return mut_extent->mutate_mapping(cache, t, laddr, std::move(f));
-    } else {
-      return extent->mutate_mapping(cache, t, laddr, std::move(f));
-    }
+    return extent->mutate_mapping(cache, t, laddr, std::move(f));
   });
 }
 
@@ -203,6 +191,12 @@ LBAInternalNode::split_entry(
   Cache &c, Transaction &t, laddr_t addr,
   internal_iterator_t iter, LBANodeRef entry)
 {
+  if (!is_pending()) {
+    auto mut = c.duplicate_for_write(t, this)->cast<LBAInternalNode>();
+    auto mut_iter = mut->iter_idx(iter->get_offset());
+    return mut->split_entry(c, t, addr, mut_iter, entry);
+  }
+
   ceph_assert(!at_max_capacity());
   auto [left, right, pivot] = entry->make_split_children(c, t);
 
@@ -227,6 +221,12 @@ LBAInternalNode::merge_entry(
   Cache &c, Transaction &t, laddr_t addr,
   internal_iterator_t iter, LBANodeRef entry)
 {
+  if (!is_pending()) {
+    auto mut = c.duplicate_for_write(t, this)->cast<LBAInternalNode>();
+    auto mut_iter = mut->iter_idx(iter->get_offset());
+    return mut->merge_entry(c, t, addr, mut_iter, entry);
+  }
+
   logger().debug(
     "LBAInternalNode: merge_entry: {}, {}",
     *this,
@@ -333,12 +333,20 @@ LBALeafNode::lookup_range_ret LBALeafNode::lookup_range(
 
 LBALeafNode::insert_ret LBALeafNode::insert(
   Cache &cache,
-  Transaction &transaction,
+  Transaction &t,
   laddr_t laddr,
   lba_map_val_t val)
 {
   ceph_assert(!at_max_capacity());
 
+  if (!is_pending()) {
+    return cache.duplicate_for_write(t, this)->cast<LBALeafNode>()->insert(
+      cache,
+      t,
+      laddr,
+      val);
+  }
+
   val.paddr = maybe_generate_relative(val.paddr);
   logger().debug(
     "LBALeafNode::insert: inserting {}~{} -> {}",
@@ -368,6 +376,15 @@ LBALeafNode::mutate_mapping_ret LBALeafNode::mutate_mapping(
   laddr_t laddr,
   mutate_func_t &&f)
 {
+  if (!is_pending()) {
+    return cache.duplicate_for_write(transaction, this)->cast<LBALeafNode>(
+    )->mutate_mapping(
+      cache,
+      transaction,
+      laddr,
+      std::move(f));
+  }
+
   ceph_assert(!at_min_capacity());
   auto mutation_pt = find(laddr);
   if (mutation_pt == end()) {