From: Samuel Just Date: Tue, 16 Jun 2020 19:32:39 +0000 (-0700) Subject: crimson/os/seastore/lba_manager/btree: move duplicate_for_write X-Git-Tag: v16.1.0~1882^2~3 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=31bfa078ac8e92b52157d0908361ff871c96e6f1;p=ceph.git crimson/os/seastore/lba_manager/btree: move duplicate_for_write Previously, it appears that I meant for the caller to insert to invoke duplicate_for_write prior to calling insert(). I think I chose that convention to avoid chained callbacks within a method not on *this. However, there are two scenarios where this is necessary: leaf and split/merge. The split/merge criteria are a little involved and were only buggily considered. This patch moves responsibilty for duplicate_for_write on *this into insert/update. Callers should assume that the callee will handle it, and should not assume that they can call back into the same node. Signed-off-by: Samuel Just --- diff --git a/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc b/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc index 0a5c9d1fa159..9fcb03eda847 100644 --- a/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc +++ b/src/crimson/os/seastore/lba_manager/btree/btree_lba_manager.cc @@ -205,7 +205,6 @@ BtreeLBAManager::insert_mapping_ret BtreeLBAManager::insert_mapping( }); } return split.safe_then([this, &t, laddr, val](LBANodeRef node) { - node = cache.duplicate_for_write(t, node)->cast(); return node->insert(cache, t, laddr, val); }); } @@ -242,9 +241,6 @@ BtreeLBAManager::update_mapping_ret BtreeLBAManager::update_mapping( { return get_root(t ).safe_then([this, f=std::move(f), &t, addr](LBANodeRef root) mutable { - if (root->depth == 0) { - root = cache.duplicate_for_write(t, root)->cast(); - } return root->mutate_mapping( cache, t, diff --git a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node_impl.cc b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node_impl.cc index deb9758fa9b2..8befb830ebb9 100644 --- a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node_impl.cc +++ b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node_impl.cc @@ -83,10 +83,6 @@ LBAInternalNode::insert_ret LBAInternalNode::insert( insert_ertr::make_ready_future(std::move(extent)); }).safe_then([&cache, &t, laddr, val=std::move(val)]( LBANodeRef extent) mutable { - if (extent->depth == 0) { - auto mut_extent = cache.duplicate_for_write(t, extent); - extent = mut_extent->cast(); - } return extent->insert(cache, t, laddr, val); }); } @@ -105,26 +101,18 @@ LBAInternalNode::mutate_mapping_ret LBAInternalNode::mutate_mapping( get_paddr() ).safe_then([this, &cache, &t, laddr](LBANodeRef extent) { if (extent->at_min_capacity()) { - auto mut_this = cache.duplicate_for_write( - t, this)->cast(); - return mut_this->merge_entry( + return merge_entry( cache, t, laddr, - mut_this->get_containing_child(laddr), + get_containing_child(laddr), extent); } else { return merge_ertr::make_ready_future( std::move(extent)); } }).safe_then([&cache, &t, laddr, f=std::move(f)](LBANodeRef extent) mutable { - if (extent->depth == 0) { - auto mut_extent = cache.duplicate_for_write( - t, extent)->cast(); - return mut_extent->mutate_mapping(cache, t, laddr, std::move(f)); - } else { - return extent->mutate_mapping(cache, t, laddr, std::move(f)); - } + return extent->mutate_mapping(cache, t, laddr, std::move(f)); }); } @@ -203,6 +191,12 @@ LBAInternalNode::split_entry( Cache &c, Transaction &t, laddr_t addr, internal_iterator_t iter, LBANodeRef entry) { + if (!is_pending()) { + auto mut = c.duplicate_for_write(t, this)->cast(); + auto mut_iter = mut->iter_idx(iter->get_offset()); + return mut->split_entry(c, t, addr, mut_iter, entry); + } + ceph_assert(!at_max_capacity()); auto [left, right, pivot] = entry->make_split_children(c, t); @@ -227,6 +221,12 @@ LBAInternalNode::merge_entry( Cache &c, Transaction &t, laddr_t addr, internal_iterator_t iter, LBANodeRef entry) { + if (!is_pending()) { + auto mut = c.duplicate_for_write(t, this)->cast(); + auto mut_iter = mut->iter_idx(iter->get_offset()); + return mut->merge_entry(c, t, addr, mut_iter, entry); + } + logger().debug( "LBAInternalNode: merge_entry: {}, {}", *this, @@ -333,12 +333,20 @@ LBALeafNode::lookup_range_ret LBALeafNode::lookup_range( LBALeafNode::insert_ret LBALeafNode::insert( Cache &cache, - Transaction &transaction, + Transaction &t, laddr_t laddr, lba_map_val_t val) { ceph_assert(!at_max_capacity()); + if (!is_pending()) { + return cache.duplicate_for_write(t, this)->cast()->insert( + cache, + t, + laddr, + val); + } + val.paddr = maybe_generate_relative(val.paddr); logger().debug( "LBALeafNode::insert: inserting {}~{} -> {}", @@ -368,6 +376,15 @@ LBALeafNode::mutate_mapping_ret LBALeafNode::mutate_mapping( laddr_t laddr, mutate_func_t &&f) { + if (!is_pending()) { + return cache.duplicate_for_write(transaction, this)->cast( + )->mutate_mapping( + cache, + transaction, + laddr, + std::move(f)); + } + ceph_assert(!at_min_capacity()); auto mutation_pt = find(laddr); if (mutation_pt == end()) {