From: Xuehan Xu Date: Tue, 8 Feb 2022 05:34:02 +0000 (+0800) Subject: crimson/os/seastore: extract lba nodes out of lba manager X-Git-Tag: v18.0.0~1254^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=917f45ab89954df9a05b7dd8bc7ee9b4604adea0;p=ceph.git crimson/os/seastore: extract lba nodes out of lba manager Change lba nodes to general fixed-size-kv btree nodes Signed-off-by: Xuehan Xu --- diff --git a/src/crimson/os/seastore/btree/btree_range_pin.h b/src/crimson/os/seastore/btree/btree_range_pin.h index 4791a9b457f..24042946739 100644 --- a/src/crimson/os/seastore/btree/btree_range_pin.h +++ b/src/crimson/os/seastore/btree/btree_range_pin.h @@ -60,6 +60,34 @@ inline std::ostream &operator<<( << ", depth=" << rhs.depth << ")"; } + +/** + * fixed_kv_node_meta_le_t + * + * On disk layout for fixed_kv_node_meta_t + */ +template +struct fixed_kv_node_meta_le_t { + bound_le_t begin = bound_le_t(0); + bound_le_t end = bound_le_t(0); + depth_le_t depth = init_depth_le(0); + + fixed_kv_node_meta_le_t() = default; + fixed_kv_node_meta_le_t( + const fixed_kv_node_meta_le_t &) = default; + explicit fixed_kv_node_meta_le_t( + const fixed_kv_node_meta_t &val) + : begin(ceph_le64(val.begin)), + end(ceph_le64(val.end)), + depth(init_depth_le(val.depth)) {} + + operator fixed_kv_node_meta_t() const { + return fixed_kv_node_meta_t{ + begin, end, depth }; + } +}; + + /** * btree_range_pin_t * diff --git a/src/crimson/os/seastore/btree/fixed_kv_node.h b/src/crimson/os/seastore/btree/fixed_kv_node.h new file mode 100644 index 00000000000..c1cde15c90a --- /dev/null +++ b/src/crimson/os/seastore/btree/fixed_kv_node.h @@ -0,0 +1,448 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#pragma once + +#include +#include +#include + + +#include "include/buffer.h" + +#include "crimson/common/fixed_kv_node_layout.h" +#include "crimson/common/errorator.h" +#include "crimson/os/seastore/lba_manager.h" +#include "crimson/os/seastore/seastore_types.h" +#include "crimson/os/seastore/cache.h" +#include "crimson/os/seastore/cached_extent.h" + +#include "crimson/os/seastore/btree/btree_range_pin.h" +#include "crimson/os/seastore/btree/fixed_kv_btree.h" + +namespace crimson::os::seastore { + +/** + * FixedKVNode + * + * Base class enabling recursive lookup between internal and leaf nodes. + */ +template +struct FixedKVNode : CachedExtent { + using FixedKVNodeRef = TCachedExtentRef; + + btree_range_pin_t pin; + + FixedKVNode(ceph::bufferptr &&ptr) : CachedExtent(std::move(ptr)), pin(this) {} + FixedKVNode(const FixedKVNode &rhs) + : CachedExtent(rhs), pin(rhs.pin, this) {} + + virtual fixed_kv_node_meta_t get_node_meta() const = 0; + + virtual ~FixedKVNode() = default; + + void on_delta_write(paddr_t record_block_offset) final { + // All in-memory relative addrs are necessarily record-relative + assert(get_prior_instance()); + pin.take_pin(get_prior_instance()->template cast()->pin); + resolve_relative_addrs(record_block_offset); + } + + void on_initial_write() final { + // All in-memory relative addrs are necessarily block-relative + resolve_relative_addrs(get_paddr()); + } + + void on_clean_read() final { + // From initial write of block, relative addrs are necessarily block-relative + resolve_relative_addrs(get_paddr()); + } + + virtual void resolve_relative_addrs(paddr_t base) = 0; +}; + +/** + * FixedKVInternalNode + * + * Abstracts operations on and layout of internal nodes for the + * LBA Tree. + */ +template < + size_t CAPACITY, + typename NODE_KEY, + typename NODE_KEY_LE, + size_t node_size, + typename node_type_t> +struct FixedKVInternalNode + : FixedKVNode, + common::FixedKVNodeLayout< + CAPACITY, + fixed_kv_node_meta_t, + fixed_kv_node_meta_le_t, + NODE_KEY, NODE_KEY_LE, + paddr_t, paddr_le_t> { + using Ref = TCachedExtentRef; + using node_layout_t = + common::FixedKVNodeLayout< + CAPACITY, + fixed_kv_node_meta_t, + fixed_kv_node_meta_le_t, + NODE_KEY, + NODE_KEY_LE, + paddr_t, + paddr_le_t>; + using internal_const_iterator_t = typename node_layout_t::const_iterator; + using internal_iterator_t = typename node_layout_t::iterator; + template + FixedKVInternalNode(T&&... t) : + FixedKVNode(std::forward(t)...), + node_layout_t(this->get_bptr().c_str()) {} + + virtual ~FixedKVInternalNode() {} + + fixed_kv_node_meta_t get_node_meta() const { + return this->get_meta(); + } + + typename node_layout_t::delta_buffer_t delta_buffer; + typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() { + return this->is_mutation_pending() + ? &delta_buffer : nullptr; + } + + CachedExtentRef duplicate_for_write() override { + assert(delta_buffer.empty()); + return CachedExtentRef(new node_type_t(*this)); + }; + + void update( + internal_const_iterator_t iter, + paddr_t addr) { + return this->journal_update( + iter, + this->maybe_generate_relative(addr), + maybe_get_delta_buffer()); + } + + void insert( + internal_const_iterator_t iter, + NODE_KEY pivot, + paddr_t addr) { + return this->journal_insert( + iter, + pivot, + this->maybe_generate_relative(addr), + maybe_get_delta_buffer()); + } + + void remove(internal_const_iterator_t iter) { + return this->journal_remove( + iter, + maybe_get_delta_buffer()); + } + + void replace( + internal_const_iterator_t iter, + NODE_KEY pivot, + paddr_t addr) { + return this->journal_replace( + iter, + pivot, + this->maybe_generate_relative(addr), + maybe_get_delta_buffer()); + } + + std::tuple + make_split_children(op_context_t c) { + auto left = c.cache.template alloc_new_extent( + c.trans, node_size); + auto right = c.cache.template alloc_new_extent( + c.trans, node_size); + auto pivot = this->split_into(*left, *right); + left->pin.set_range(left->get_meta()); + right->pin.set_range(right->get_meta()); + return std::make_tuple( + left, + right, + pivot); + } + + Ref make_full_merge( + op_context_t c, + Ref &right) { + auto replacement = c.cache.template alloc_new_extent( + c.trans, node_size); + replacement->merge_from(*this, *right->template cast()); + replacement->pin.set_range(replacement->get_meta()); + return replacement; + } + + std::tuple + make_balanced( + op_context_t c, + Ref &_right, + bool prefer_left) { + ceph_assert(_right->get_type() == this->get_type()); + auto &right = *_right->template cast(); + auto replacement_left = c.cache.template alloc_new_extent( + c.trans, node_size); + auto replacement_right = c.cache.template alloc_new_extent( + c.trans, node_size); + + auto pivot = this->balance_into_new_nodes( + *this, + right, + prefer_left, + *replacement_left, + *replacement_right); + + replacement_left->pin.set_range(replacement_left->get_meta()); + replacement_right->pin.set_range(replacement_right->get_meta()); + return std::make_tuple( + replacement_left, + replacement_right, + pivot); + } + + /** + * Internal relative addresses on read or in memory prior to commit + * are either record or block relative depending on whether this + * physical node is is_initial_pending() or just is_pending(). + * + * User passes appropriate base depending on lifecycle and + * resolve_relative_addrs fixes up relative internal references + * based on base. + */ + void resolve_relative_addrs(paddr_t base) + { + LOG_PREFIX(FixedKVInternalNode::resolve_relative_addrs); + for (auto i: *this) { + if (i->get_val().is_relative()) { + auto updated = base.add_relative(i->get_val()); + SUBTRACE(seastore_lba_details, "{} -> {}", i->get_val(), updated); + i->set_val(updated); + } + } + } + + void node_resolve_vals( + internal_iterator_t from, + internal_iterator_t to) const { + if (this->is_initial_pending()) { + for (auto i = from; i != to; ++i) { + if (i->get_val().is_relative()) { + assert(i->get_val().is_block_relative()); + i->set_val(this->get_paddr().add_relative(i->get_val())); + } + } + } + } + void node_unresolve_vals( + internal_iterator_t from, + internal_iterator_t to) const { + if (this->is_initial_pending()) { + for (auto i = from; i != to; ++i) { + if (i->get_val().is_relative()) { + assert(i->get_val().is_record_relative()); + i->set_val(i->get_val() - this->get_paddr()); + } + } + } + } + + std::ostream &print_detail(std::ostream &out) const + { + return out << ", size=" << this->get_size() + << ", meta=" << this->get_meta(); + } + + ceph::bufferlist get_delta() { + ceph::buffer::ptr bptr(delta_buffer.get_bytes()); + delta_buffer.copy_out(bptr.c_str(), bptr.length()); + ceph::bufferlist bl; + bl.push_back(bptr); + return bl; + } + + void apply_delta_and_adjust_crc( + paddr_t base, const ceph::bufferlist &_bl) { + assert(_bl.length()); + ceph::bufferlist bl = _bl; + bl.rebuild(); + typename node_layout_t::delta_buffer_t buffer; + buffer.copy_in(bl.front().c_str(), bl.front().length()); + buffer.replay(*this); + this->set_last_committed_crc(this->get_crc32c()); + resolve_relative_addrs(base); + } + + constexpr static size_t get_min_capacity() { + return (node_layout_t::get_capacity() - 1) / 2; + } + + bool at_max_capacity() const { + assert(this->get_size() <= node_layout_t::get_capacity()); + return this->get_size() == node_layout_t::get_capacity(); + } + + bool at_min_capacity() const { + assert(this->get_size() >= (get_min_capacity() - 1)); + return this->get_size() <= get_min_capacity(); + } + + bool below_min_capacity() const { + assert(this->get_size() >= (get_min_capacity() - 1)); + return this->get_size() < get_min_capacity(); + } +}; + +template < + size_t CAPACITY, + typename NODE_KEY, + typename NODE_KEY_LE, + typename VAL, + typename VAL_LE, + size_t node_size, + typename node_type_t> +struct FixedKVLeafNode + : FixedKVNode, + common::FixedKVNodeLayout< + CAPACITY, + fixed_kv_node_meta_t, + fixed_kv_node_meta_le_t, + NODE_KEY, NODE_KEY_LE, + VAL, VAL_LE> { + using Ref = TCachedExtentRef; + using node_layout_t = + common::FixedKVNodeLayout< + CAPACITY, + fixed_kv_node_meta_t, + fixed_kv_node_meta_le_t, + NODE_KEY, + NODE_KEY_LE, + VAL, + VAL_LE>; + using internal_const_iterator_t = typename node_layout_t::const_iterator; + template + FixedKVLeafNode(T&&... t) : + FixedKVNode(std::forward(t)...), + node_layout_t(this->get_bptr().c_str()) {} + + virtual ~FixedKVLeafNode() {} + + fixed_kv_node_meta_t get_node_meta() const { + return this->get_meta(); + } + + typename node_layout_t::delta_buffer_t delta_buffer; + virtual typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() { + return this->is_mutation_pending() ? &delta_buffer : nullptr; + } + + CachedExtentRef duplicate_for_write() override { + assert(delta_buffer.empty()); + return CachedExtentRef(new node_type_t(*this)); + }; + + virtual void update( + internal_const_iterator_t iter, + VAL val) = 0; + virtual internal_const_iterator_t insert( + internal_const_iterator_t iter, + NODE_KEY addr, + VAL val) = 0; + virtual void remove(internal_const_iterator_t iter) = 0; + + std::tuple + make_split_children(op_context_t c) { + auto left = c.cache.template alloc_new_extent( + c.trans, node_size); + auto right = c.cache.template alloc_new_extent( + c.trans, node_size); + auto pivot = this->split_into(*left, *right); + left->pin.set_range(left->get_meta()); + right->pin.set_range(right->get_meta()); + return std::make_tuple( + left, + right, + pivot); + } + + Ref make_full_merge( + op_context_t c, + Ref &right) { + auto replacement = c.cache.template alloc_new_extent( + c.trans, node_size); + replacement->merge_from(*this, *right->template cast()); + replacement->pin.set_range(replacement->get_meta()); + return replacement; + } + + std::tuple + make_balanced( + op_context_t c, + Ref &_right, + bool prefer_left) { + ceph_assert(_right->get_type() == this->get_type()); + auto &right = *_right->template cast(); + auto replacement_left = c.cache.template alloc_new_extent( + c.trans, node_size); + auto replacement_right = c.cache.template alloc_new_extent( + c.trans, node_size); + + auto pivot = this->balance_into_new_nodes( + *this, + right, + prefer_left, + *replacement_left, + *replacement_right); + + replacement_left->pin.set_range(replacement_left->get_meta()); + replacement_right->pin.set_range(replacement_right->get_meta()); + return std::make_tuple( + replacement_left, + replacement_right, + pivot); + } + + ceph::bufferlist get_delta() { + ceph::buffer::ptr bptr(delta_buffer.get_bytes()); + delta_buffer.copy_out(bptr.c_str(), bptr.length()); + ceph::bufferlist bl; + bl.push_back(bptr); + return bl; + } + + void apply_delta_and_adjust_crc( + paddr_t base, const ceph::bufferlist &_bl) { + assert(_bl.length()); + ceph::bufferlist bl = _bl; + bl.rebuild(); + typename node_layout_t::delta_buffer_t buffer; + buffer.copy_in(bl.front().c_str(), bl.front().length()); + buffer.replay(*this); + this->set_last_committed_crc(this->get_crc32c()); + this->resolve_relative_addrs(base); + } + + constexpr static size_t get_min_capacity() { + return (node_layout_t::get_capacity() - 1) / 2; + } + + bool at_max_capacity() const { + assert(this->get_size() <= node_layout_t::get_capacity()); + return this->get_size() == node_layout_t::get_capacity(); + } + + bool at_min_capacity() const { + assert(this->get_size() >= (get_min_capacity() - 1)); + return this->get_size() <= get_min_capacity(); + } + + bool below_min_capacity() const { + assert(this->get_size() >= (get_min_capacity() - 1)); + return this->get_size() < get_min_capacity(); + } +}; + +} // namespace crimson::os::seastore diff --git a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc index a6ee25e9170..e3e69421fc6 100644 --- a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc +++ b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc @@ -27,24 +27,6 @@ std::ostream& operator<<(std::ostream& out, const lba_map_val_t& v) << ")"; } -std::ostream &LBAInternalNode::print_detail(std::ostream &out) const -{ - return out << ", size=" << get_size() - << ", meta=" << get_meta(); -} - -void LBAInternalNode::resolve_relative_addrs(paddr_t base) -{ - LOG_PREFIX(LBAInternalNode::resolve_relative_addrs); - for (auto i: *this) { - if (i->get_val().is_relative()) { - auto updated = base.add_relative(i->get_val()); - TRACE("{} -> {}", i->get_val(), updated); - i->set_val(updated); - } - } -} - std::ostream &LBALeafNode::print_detail(std::ostream &out) const { return out << ", size=" << get_size() diff --git a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h index 8b2530e7c91..004a5778001 100644 --- a/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h +++ b/src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h @@ -19,10 +19,12 @@ #include "crimson/os/seastore/btree/btree_range_pin.h" #include "crimson/os/seastore/btree/fixed_kv_btree.h" +#include "crimson/os/seastore/btree/fixed_kv_node.h" namespace crimson::os::seastore::lba_manager::btree { using base_iertr = LBAManager::base_iertr; +using LBANode = FixedKVNode; /** * lba_map_val_t @@ -54,66 +56,9 @@ std::ostream& operator<<(std::ostream& out, const lba_map_val_t&); constexpr size_t LBA_BLOCK_SIZE = 4096; -/** - * lba_node_meta_le_t - * - * On disk layout for fixed_kv_node_meta_t - */ -struct lba_node_meta_le_t { - laddr_le_t begin = laddr_le_t(0); - laddr_le_t end = laddr_le_t(0); - depth_le_t depth = init_depth_le(0); - - lba_node_meta_le_t() = default; - lba_node_meta_le_t(const lba_node_meta_le_t &) = default; - explicit lba_node_meta_le_t(const fixed_kv_node_meta_t &val) - : begin(ceph_le64(val.begin)), - end(ceph_le64(val.end)), - depth(init_depth_le(val.depth)) {} - - operator fixed_kv_node_meta_t() const { - return fixed_kv_node_meta_t{ begin, end, depth }; - } -}; - -/** - * LBANode - * - * Base class enabling recursive lookup between internal and leaf nodes. - */ -struct LBANode : CachedExtent { - using LBANodeRef = TCachedExtentRef; - - btree_range_pin_t pin; - - LBANode(ceph::bufferptr &&ptr) : CachedExtent(std::move(ptr)), pin(this) {} - LBANode(const LBANode &rhs) - : CachedExtent(rhs), pin(rhs.pin, this) {} - - virtual fixed_kv_node_meta_t get_node_meta() const = 0; +using lba_node_meta_t = fixed_kv_node_meta_t; - virtual ~LBANode() = default; - - void on_delta_write(paddr_t record_block_offset) final { - // All in-memory relative addrs are necessarily record-relative - assert(get_prior_instance()); - pin.take_pin(get_prior_instance()->cast()->pin); - resolve_relative_addrs(record_block_offset); - } - - void on_initial_write() final { - // All in-memory relative addrs are necessarily block-relative - resolve_relative_addrs(get_paddr()); - } - - void on_clean_read() final { - // From initial write of block, relative addrs are necessarily block-relative - resolve_relative_addrs(get_paddr()); - } - - virtual void resolve_relative_addrs(paddr_t base) = 0; -}; -using LBANodeRef = LBANode::LBANodeRef; +using lba_node_meta_le_t = fixed_kv_node_meta_le_t; /** * LBAInternalNode @@ -134,197 +79,22 @@ using LBANodeRef = LBANode::LBANodeRef; */ constexpr size_t INTERNAL_NODE_CAPACITY = 254; struct LBAInternalNode - : LBANode, - common::FixedKVNodeLayout< + : FixedKVInternalNode< INTERNAL_NODE_CAPACITY, - fixed_kv_node_meta_t, lba_node_meta_le_t, laddr_t, laddr_le_t, - paddr_t, paddr_le_t> { + LBA_BLOCK_SIZE, + LBAInternalNode> { using Ref = TCachedExtentRef; using internal_iterator_t = const_iterator; template LBAInternalNode(T&&... t) : - LBANode(std::forward(t)...), - FixedKVNodeLayout(get_bptr().c_str()) {} + FixedKVInternalNode(std::forward(t)...) {} static constexpr extent_types_t TYPE = extent_types_t::LADDR_INTERNAL; - fixed_kv_node_meta_t get_node_meta() const { return get_meta(); } - - CachedExtentRef duplicate_for_write() final { - assert(delta_buffer.empty()); - return CachedExtentRef(new LBAInternalNode(*this)); - }; - - delta_buffer_t delta_buffer; - delta_buffer_t *maybe_get_delta_buffer() { - return is_mutation_pending() ? &delta_buffer : nullptr; - } - - void update( - const_iterator iter, - paddr_t addr) { - return journal_update( - iter, - maybe_generate_relative(addr), - maybe_get_delta_buffer()); - } - - void insert( - const_iterator iter, - laddr_t pivot, - paddr_t addr) { - return journal_insert( - iter, - pivot, - maybe_generate_relative(addr), - maybe_get_delta_buffer()); - } - - void remove(const_iterator iter) { - return journal_remove( - iter, - maybe_get_delta_buffer()); - } - - void replace( - const_iterator iter, - laddr_t pivot, - paddr_t addr) { - return journal_replace( - iter, - pivot, - maybe_generate_relative(addr), - maybe_get_delta_buffer()); - } - - std::tuple - make_split_children(op_context_t c) { - auto left = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto right = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto pivot = split_into(*left, *right); - left->pin.set_range(left->get_meta()); - right->pin.set_range(right->get_meta()); - return std::make_tuple( - left, - right, - pivot); - } - - Ref make_full_merge( - op_context_t c, - Ref &right) { - auto replacement = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - replacement->merge_from(*this, *right->cast()); - replacement->pin.set_range(replacement->get_meta()); - return replacement; - } - - std::tuple - make_balanced( - op_context_t c, - Ref &_right, - bool prefer_left) { - ceph_assert(_right->get_type() == get_type()); - auto &right = *_right->cast(); - auto replacement_left = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto replacement_right = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - - auto pivot = balance_into_new_nodes( - *this, - right, - prefer_left, - *replacement_left, - *replacement_right); - - replacement_left->pin.set_range(replacement_left->get_meta()); - replacement_right->pin.set_range(replacement_right->get_meta()); - return std::make_tuple( - replacement_left, - replacement_right, - pivot); - } - - /** - * Internal relative addresses on read or in memory prior to commit - * are either record or block relative depending on whether this - * physical node is is_initial_pending() or just is_pending(). - * - * User passes appropriate base depending on lifecycle and - * resolve_relative_addrs fixes up relative internal references - * based on base. - */ - void resolve_relative_addrs(paddr_t base); - void node_resolve_vals(iterator from, iterator to) const final { - if (is_initial_pending()) { - for (auto i = from; i != to; ++i) { - if (i->get_val().is_relative()) { - assert(i->get_val().is_block_relative()); - i->set_val(get_paddr().add_relative(i->get_val())); - } - } - } - } - void node_unresolve_vals(iterator from, iterator to) const final { - if (is_initial_pending()) { - for (auto i = from; i != to; ++i) { - if (i->get_val().is_relative()) { - assert(i->get_val().is_record_relative()); - i->set_val(i->get_val() - get_paddr()); - } - } - } - } - extent_types_t get_type() const final { return TYPE; } - - std::ostream &print_detail(std::ostream &out) const final; - - ceph::bufferlist get_delta() final { - ceph::buffer::ptr bptr(delta_buffer.get_bytes()); - delta_buffer.copy_out(bptr.c_str(), bptr.length()); - ceph::bufferlist bl; - bl.push_back(bptr); - return bl; - } - - void apply_delta_and_adjust_crc( - paddr_t base, const ceph::bufferlist &_bl) final { - assert(_bl.length()); - ceph::bufferlist bl = _bl; - bl.rebuild(); - delta_buffer_t buffer; - buffer.copy_in(bl.front().c_str(), bl.front().length()); - buffer.replay(*this); - set_last_committed_crc(get_crc32c()); - resolve_relative_addrs(base); - } - - constexpr static size_t get_min_capacity() { - return (get_capacity() - 1) / 2; - } - - bool at_max_capacity() const { - assert(get_size() <= get_capacity()); - return get_size() == get_capacity(); - } - - bool at_min_capacity() const { - assert(get_size() >= (get_min_capacity() - 1)); - return get_size() <= get_min_capacity(); - } - - bool below_min_capacity() const { - assert(get_size() >= (get_min_capacity() - 1)); - return get_size() < get_min_capacity(); - } }; using LBAInternalNodeRef = LBAInternalNode::Ref; @@ -372,36 +142,23 @@ struct lba_map_val_le_t { }; struct LBALeafNode - : LBANode, - common::FixedKVNodeLayout< + : FixedKVLeafNode< LEAF_NODE_CAPACITY, - fixed_kv_node_meta_t, lba_node_meta_le_t, laddr_t, laddr_le_t, - lba_map_val_t, lba_map_val_le_t> { + lba_map_val_t, lba_map_val_le_t, + LBA_BLOCK_SIZE, + LBALeafNode> { using Ref = TCachedExtentRef; using internal_iterator_t = const_iterator; template LBALeafNode(T&&... t) : - LBANode(std::forward(t)...), - FixedKVNodeLayout(get_bptr().c_str()) {} + FixedKVLeafNode(std::forward(t)...) {} static constexpr extent_types_t TYPE = extent_types_t::LADDR_LEAF; - fixed_kv_node_meta_t get_node_meta() const { return get_meta(); } - - CachedExtentRef duplicate_for_write() final { - assert(delta_buffer.empty()); - return CachedExtentRef(new LBALeafNode(*this)); - }; - - delta_buffer_t delta_buffer; - delta_buffer_t *maybe_get_delta_buffer() { - return is_mutation_pending() ? &delta_buffer : nullptr; - } - void update( const_iterator iter, - lba_map_val_t val) { + lba_map_val_t val) final { val.paddr = maybe_generate_relative(val.paddr); return journal_update( iter, @@ -409,10 +166,10 @@ struct LBALeafNode maybe_get_delta_buffer()); } - auto insert( + const_iterator insert( const_iterator iter, laddr_t addr, - lba_map_val_t val) { + lba_map_val_t val) final { val.paddr = maybe_generate_relative(val.paddr); journal_insert( iter, @@ -422,65 +179,12 @@ struct LBALeafNode return iter; } - void remove(const_iterator iter) { + void remove(const_iterator iter) final { return journal_remove( iter, maybe_get_delta_buffer()); } - - std::tuple - make_split_children(op_context_t c) { - auto left = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto right = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto pivot = split_into(*left, *right); - left->pin.set_range(left->get_meta()); - right->pin.set_range(right->get_meta()); - return std::make_tuple( - left, - right, - pivot); - } - - Ref make_full_merge( - op_context_t c, - Ref &right) { - auto replacement = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - replacement->merge_from(*this, *right->cast()); - replacement->pin.set_range(replacement->get_meta()); - return replacement; - } - - std::tuple - make_balanced( - op_context_t c, - Ref &_right, - bool prefer_left) { - ceph_assert(_right->get_type() == get_type()); - auto &right = *_right->cast(); - auto replacement_left = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - auto replacement_right = c.cache.alloc_new_extent( - c.trans, LBA_BLOCK_SIZE); - - auto pivot = balance_into_new_nodes( - *this, - right, - prefer_left, - *replacement_left, - *replacement_right); - - replacement_left->pin.set_range(replacement_left->get_meta()); - replacement_right->pin.set_range(replacement_right->get_meta()); - return std::make_tuple( - replacement_left, - replacement_right, - pivot); - } - // See LBAInternalNode, same concept void resolve_relative_addrs(paddr_t base); void node_resolve_vals(iterator from, iterator to) const final { @@ -509,50 +213,11 @@ struct LBALeafNode } } - ceph::bufferlist get_delta() final { - ceph::buffer::ptr bptr(delta_buffer.get_bytes()); - delta_buffer.copy_out(bptr.c_str(), bptr.length()); - ceph::bufferlist bl; - bl.push_back(bptr); - return bl; - } - - void apply_delta_and_adjust_crc( - paddr_t base, const ceph::bufferlist &_bl) final { - assert(_bl.length()); - ceph::bufferlist bl = _bl; - bl.rebuild(); - delta_buffer_t buffer; - buffer.copy_in(bl.front().c_str(), bl.front().length()); - buffer.replay(*this); - set_last_committed_crc(get_crc32c()); - resolve_relative_addrs(base); - } - extent_types_t get_type() const final { return TYPE; } std::ostream &print_detail(std::ostream &out) const final; - - constexpr static size_t get_min_capacity() { - return (get_capacity() - 1) / 2; - } - - bool at_max_capacity() const { - assert(get_size() <= get_capacity()); - return get_size() == get_capacity(); - } - - bool at_min_capacity() const { - assert(get_size() >= (get_min_capacity() - 1)); - return get_size() <= get_min_capacity(); - } - - bool below_min_capacity() const { - assert(get_size() >= (get_min_capacity() - 1)); - return get_size() < get_min_capacity(); - } }; using LBALeafNodeRef = TCachedExtentRef; diff --git a/src/crimson/os/seastore/seastore_types.h b/src/crimson/os/seastore/seastore_types.h index 646f78b76af..b875099e88d 100644 --- a/src/crimson/os/seastore/seastore_types.h +++ b/src/crimson/os/seastore/seastore_types.h @@ -712,6 +712,8 @@ struct __attribute((packed)) paddr_le_t { ceph_le64 dev_addr = ceph_le64(P_ADDR_NULL.dev_addr); + using orig_type = paddr_t; + paddr_le_t() = default; paddr_le_t(const paddr_t &addr) : dev_addr(ceph_le64(addr.dev_addr)) {} @@ -800,6 +802,8 @@ constexpr laddr_t L_ADDR_LBAT = L_ADDR_MAX - 2; struct __attribute((packed)) laddr_le_t { ceph_le64 laddr = ceph_le64(L_ADDR_NULL); + using orig_type = laddr_t; + laddr_le_t() = default; laddr_le_t(const laddr_le_t &) = default; explicit laddr_le_t(const laddr_t &addr)