]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
crimson/os/seastore: extract lba nodes out of lba manager
authorXuehan Xu <xxhdx1985126@gmail.com>
Tue, 8 Feb 2022 05:34:02 +0000 (13:34 +0800)
committerXuehan Xu <xxhdx1985126@gmail.com>
Sun, 13 Mar 2022 09:16:53 +0000 (17:16 +0800)
Change lba nodes to general fixed-size-kv btree nodes

Signed-off-by: Xuehan Xu <xxhdx1985126@gmail.com>
src/crimson/os/seastore/btree/btree_range_pin.h
src/crimson/os/seastore/btree/fixed_kv_node.h [new file with mode: 0644]
src/crimson/os/seastore/lba_manager/btree/lba_btree_node.cc
src/crimson/os/seastore/lba_manager/btree/lba_btree_node.h
src/crimson/os/seastore/seastore_types.h

index 4791a9b457ffd4ce3ede484ea11b4b976b42b5ca..24042946739b868d9b54be28c184354596554285 100644 (file)
@@ -60,6 +60,34 @@ inline std::ostream &operator<<(
             << ", depth=" << rhs.depth
             << ")";
 }
+
+/**
+ * fixed_kv_node_meta_le_t
+ *
+ * On disk layout for fixed_kv_node_meta_t
+ */
+template <typename bound_le_t>
+struct fixed_kv_node_meta_le_t {
+  bound_le_t begin = bound_le_t(0);
+  bound_le_t end = bound_le_t(0);
+  depth_le_t depth = init_depth_le(0);
+
+  fixed_kv_node_meta_le_t() = default;
+  fixed_kv_node_meta_le_t(
+    const fixed_kv_node_meta_le_t<bound_le_t> &) = default;
+  explicit fixed_kv_node_meta_le_t(
+    const fixed_kv_node_meta_t<typename bound_le_t::orig_type> &val)
+    : begin(ceph_le64(val.begin)),
+      end(ceph_le64(val.end)),
+      depth(init_depth_le(val.depth)) {}
+
+  operator fixed_kv_node_meta_t<typename bound_le_t::orig_type>() const {
+    return fixed_kv_node_meta_t<typename bound_le_t::orig_type>{
+           begin, end, depth };
+  }
+};
+
+
 /**
  * btree_range_pin_t
  *
diff --git a/src/crimson/os/seastore/btree/fixed_kv_node.h b/src/crimson/os/seastore/btree/fixed_kv_node.h
new file mode 100644 (file)
index 0000000..c1cde15
--- /dev/null
@@ -0,0 +1,448 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <sys/mman.h>
+#include <memory>
+#include <string.h>
+
+
+#include "include/buffer.h"
+
+#include "crimson/common/fixed_kv_node_layout.h"
+#include "crimson/common/errorator.h"
+#include "crimson/os/seastore/lba_manager.h"
+#include "crimson/os/seastore/seastore_types.h"
+#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/cached_extent.h"
+
+#include "crimson/os/seastore/btree/btree_range_pin.h"
+#include "crimson/os/seastore/btree/fixed_kv_btree.h"
+
+namespace crimson::os::seastore {
+
+/**
+ * FixedKVNode
+ *
+ * Base class enabling recursive lookup between internal and leaf nodes.
+ */
+template <typename node_key_t>
+struct FixedKVNode : CachedExtent {
+  using FixedKVNodeRef = TCachedExtentRef<FixedKVNode>;
+
+  btree_range_pin_t<node_key_t> pin;
+
+  FixedKVNode(ceph::bufferptr &&ptr) : CachedExtent(std::move(ptr)), pin(this) {}
+  FixedKVNode(const FixedKVNode &rhs)
+    : CachedExtent(rhs), pin(rhs.pin, this) {}
+
+  virtual fixed_kv_node_meta_t<node_key_t> get_node_meta() const = 0;
+
+  virtual ~FixedKVNode() = default;
+
+  void on_delta_write(paddr_t record_block_offset) final {
+    // All in-memory relative addrs are necessarily record-relative
+    assert(get_prior_instance());
+    pin.take_pin(get_prior_instance()->template cast<FixedKVNode>()->pin);
+    resolve_relative_addrs(record_block_offset);
+  }
+
+  void on_initial_write() final {
+    // All in-memory relative addrs are necessarily block-relative
+    resolve_relative_addrs(get_paddr());
+  }
+
+  void on_clean_read() final {
+    // From initial write of block, relative addrs are necessarily block-relative
+    resolve_relative_addrs(get_paddr());
+  }
+
+  virtual void resolve_relative_addrs(paddr_t base) = 0;
+};
+
+/**
+ * FixedKVInternalNode
+ *
+ * Abstracts operations on and layout of internal nodes for the
+ * LBA Tree.
+ */
+template <
+  size_t CAPACITY,
+  typename NODE_KEY,
+  typename NODE_KEY_LE,
+  size_t node_size,
+  typename node_type_t>
+struct FixedKVInternalNode
+  : FixedKVNode<NODE_KEY>,
+    common::FixedKVNodeLayout<
+      CAPACITY,
+      fixed_kv_node_meta_t<NODE_KEY>,
+      fixed_kv_node_meta_le_t<NODE_KEY_LE>,
+      NODE_KEY, NODE_KEY_LE,
+      paddr_t, paddr_le_t> {
+  using Ref = TCachedExtentRef<node_type_t>;
+  using node_layout_t =
+    common::FixedKVNodeLayout<
+      CAPACITY,
+      fixed_kv_node_meta_t<NODE_KEY>,
+      fixed_kv_node_meta_le_t<NODE_KEY_LE>,
+      NODE_KEY,
+      NODE_KEY_LE,
+      paddr_t,
+      paddr_le_t>;
+  using internal_const_iterator_t = typename node_layout_t::const_iterator;
+  using internal_iterator_t = typename node_layout_t::iterator;
+  template <typename... T>
+  FixedKVInternalNode(T&&... t) :
+    FixedKVNode<NODE_KEY>(std::forward<T>(t)...),
+    node_layout_t(this->get_bptr().c_str()) {}
+
+  virtual ~FixedKVInternalNode() {}
+
+  fixed_kv_node_meta_t<NODE_KEY> get_node_meta() const {
+    return this->get_meta();
+  }
+
+  typename node_layout_t::delta_buffer_t delta_buffer;
+  typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() {
+    return this->is_mutation_pending() 
+           ? &delta_buffer : nullptr;
+  }
+
+  CachedExtentRef duplicate_for_write() override {
+    assert(delta_buffer.empty());
+    return CachedExtentRef(new node_type_t(*this));
+  };
+
+  void update(
+    internal_const_iterator_t iter,
+    paddr_t addr) {
+    return this->journal_update(
+      iter,
+      this->maybe_generate_relative(addr),
+      maybe_get_delta_buffer());
+  }
+
+  void insert(
+    internal_const_iterator_t iter,
+    NODE_KEY pivot,
+    paddr_t addr) {
+    return this->journal_insert(
+      iter,
+      pivot,
+      this->maybe_generate_relative(addr),
+      maybe_get_delta_buffer());
+  }
+
+  void remove(internal_const_iterator_t iter) {
+    return this->journal_remove(
+      iter,
+      maybe_get_delta_buffer());
+  }
+
+  void replace(
+    internal_const_iterator_t iter,
+    NODE_KEY pivot,
+    paddr_t addr) {
+    return this->journal_replace(
+      iter,
+      pivot,
+      this->maybe_generate_relative(addr),
+      maybe_get_delta_buffer());
+  }
+
+  std::tuple<Ref, Ref, NODE_KEY>
+  make_split_children(op_context_t<NODE_KEY> c) {
+    auto left = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto right = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto pivot = this->split_into(*left, *right);
+    left->pin.set_range(left->get_meta());
+    right->pin.set_range(right->get_meta());
+    return std::make_tuple(
+      left,
+      right,
+      pivot);
+  }
+
+  Ref make_full_merge(
+    op_context_t<NODE_KEY> c,
+    Ref &right) {
+    auto replacement = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    replacement->merge_from(*this, *right->template cast<node_type_t>());
+    replacement->pin.set_range(replacement->get_meta());
+    return replacement;
+  }
+
+  std::tuple<Ref, Ref, NODE_KEY>
+  make_balanced(
+    op_context_t<NODE_KEY> c,
+    Ref &_right,
+    bool prefer_left) {
+    ceph_assert(_right->get_type() == this->get_type());
+    auto &right = *_right->template cast<node_type_t>();
+    auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+
+    auto pivot = this->balance_into_new_nodes(
+      *this,
+      right,
+      prefer_left,
+      *replacement_left,
+      *replacement_right);
+
+    replacement_left->pin.set_range(replacement_left->get_meta());
+    replacement_right->pin.set_range(replacement_right->get_meta());
+    return std::make_tuple(
+      replacement_left,
+      replacement_right,
+      pivot);
+  }
+
+  /**
+   * Internal relative addresses on read or in memory prior to commit
+   * are either record or block relative depending on whether this
+   * physical node is is_initial_pending() or just is_pending().
+   *
+   * User passes appropriate base depending on lifecycle and
+   * resolve_relative_addrs fixes up relative internal references
+   * based on base.
+   */
+  void resolve_relative_addrs(paddr_t base)
+  {
+    LOG_PREFIX(FixedKVInternalNode::resolve_relative_addrs);
+    for (auto i: *this) {
+      if (i->get_val().is_relative()) {
+       auto updated = base.add_relative(i->get_val());
+       SUBTRACE(seastore_lba_details, "{} -> {}", i->get_val(), updated);
+       i->set_val(updated);
+      }
+    }
+  }
+
+  void node_resolve_vals(
+    internal_iterator_t from,
+    internal_iterator_t to) const {
+    if (this->is_initial_pending()) {
+      for (auto i = from; i != to; ++i) {
+       if (i->get_val().is_relative()) {
+         assert(i->get_val().is_block_relative());
+         i->set_val(this->get_paddr().add_relative(i->get_val()));
+       }
+      }
+    }
+  }
+  void node_unresolve_vals(
+    internal_iterator_t from,
+    internal_iterator_t to) const {
+    if (this->is_initial_pending()) {
+      for (auto i = from; i != to; ++i) {
+       if (i->get_val().is_relative()) {
+         assert(i->get_val().is_record_relative());
+         i->set_val(i->get_val() - this->get_paddr());
+       }
+      }
+    }
+  }
+
+  std::ostream &print_detail(std::ostream &out) const
+  {
+    return out << ", size=" << this->get_size()
+              << ", meta=" << this->get_meta();
+  }
+
+  ceph::bufferlist get_delta() {
+    ceph::buffer::ptr bptr(delta_buffer.get_bytes());
+    delta_buffer.copy_out(bptr.c_str(), bptr.length());
+    ceph::bufferlist bl;
+    bl.push_back(bptr);
+    return bl;
+  }
+
+  void apply_delta_and_adjust_crc(
+    paddr_t base, const ceph::bufferlist &_bl) {
+    assert(_bl.length());
+    ceph::bufferlist bl = _bl;
+    bl.rebuild();
+    typename node_layout_t::delta_buffer_t buffer;
+    buffer.copy_in(bl.front().c_str(), bl.front().length());
+    buffer.replay(*this);
+    this->set_last_committed_crc(this->get_crc32c());
+    resolve_relative_addrs(base);
+  }
+
+  constexpr static size_t get_min_capacity() {
+    return (node_layout_t::get_capacity() - 1) / 2;
+  }
+
+  bool at_max_capacity() const {
+    assert(this->get_size() <= node_layout_t::get_capacity());
+    return this->get_size() == node_layout_t::get_capacity();
+  }
+
+  bool at_min_capacity() const {
+    assert(this->get_size() >= (get_min_capacity() - 1));
+    return this->get_size() <= get_min_capacity();
+  }
+
+  bool below_min_capacity() const {
+    assert(this->get_size() >= (get_min_capacity() - 1));
+    return this->get_size() < get_min_capacity();
+  }
+};
+
+template <
+  size_t CAPACITY,
+  typename NODE_KEY,
+  typename NODE_KEY_LE,
+  typename VAL,
+  typename VAL_LE,
+  size_t node_size,
+  typename node_type_t>
+struct FixedKVLeafNode
+  : FixedKVNode<NODE_KEY>,
+    common::FixedKVNodeLayout<
+      CAPACITY,
+      fixed_kv_node_meta_t<NODE_KEY>,
+      fixed_kv_node_meta_le_t<NODE_KEY_LE>,
+      NODE_KEY, NODE_KEY_LE,
+      VAL, VAL_LE> {
+  using Ref = TCachedExtentRef<node_type_t>;
+  using node_layout_t =
+    common::FixedKVNodeLayout<
+      CAPACITY,
+      fixed_kv_node_meta_t<NODE_KEY>,
+      fixed_kv_node_meta_le_t<NODE_KEY_LE>,
+      NODE_KEY,
+      NODE_KEY_LE,
+      VAL,
+      VAL_LE>;
+  using internal_const_iterator_t = typename node_layout_t::const_iterator;
+  template <typename... T>
+  FixedKVLeafNode(T&&... t) :
+    FixedKVNode<NODE_KEY>(std::forward<T>(t)...),
+    node_layout_t(this->get_bptr().c_str()) {}
+
+  virtual ~FixedKVLeafNode() {}
+
+  fixed_kv_node_meta_t<NODE_KEY> get_node_meta() const {
+    return this->get_meta();
+  }
+
+  typename node_layout_t::delta_buffer_t delta_buffer;
+  virtual typename node_layout_t::delta_buffer_t *maybe_get_delta_buffer() {
+    return this->is_mutation_pending() ? &delta_buffer : nullptr;
+  }
+
+  CachedExtentRef duplicate_for_write() override {
+    assert(delta_buffer.empty());
+    return CachedExtentRef(new node_type_t(*this));
+  };
+
+  virtual void update(
+    internal_const_iterator_t iter,
+    VAL val) = 0;
+  virtual internal_const_iterator_t insert(
+    internal_const_iterator_t iter,
+    NODE_KEY addr,
+    VAL val) = 0;
+  virtual void remove(internal_const_iterator_t iter) = 0;
+
+  std::tuple<Ref, Ref, NODE_KEY>
+  make_split_children(op_context_t<NODE_KEY> c) {
+    auto left = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto right = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto pivot = this->split_into(*left, *right);
+    left->pin.set_range(left->get_meta());
+    right->pin.set_range(right->get_meta());
+    return std::make_tuple(
+      left,
+      right,
+      pivot);
+  }
+
+  Ref make_full_merge(
+    op_context_t<NODE_KEY> c,
+    Ref &right) {
+    auto replacement = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    replacement->merge_from(*this, *right->template cast<node_type_t>());
+    replacement->pin.set_range(replacement->get_meta());
+    return replacement;
+  }
+
+  std::tuple<Ref, Ref, NODE_KEY>
+  make_balanced(
+    op_context_t<NODE_KEY> c,
+    Ref &_right,
+    bool prefer_left) {
+    ceph_assert(_right->get_type() == this->get_type());
+    auto &right = *_right->template cast<node_type_t>();
+    auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+    auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
+      c.trans, node_size);
+
+    auto pivot = this->balance_into_new_nodes(
+      *this,
+      right,
+      prefer_left,
+      *replacement_left,
+      *replacement_right);
+
+    replacement_left->pin.set_range(replacement_left->get_meta());
+    replacement_right->pin.set_range(replacement_right->get_meta());
+    return std::make_tuple(
+      replacement_left,
+      replacement_right,
+      pivot);
+  }
+
+  ceph::bufferlist get_delta() {
+    ceph::buffer::ptr bptr(delta_buffer.get_bytes());
+    delta_buffer.copy_out(bptr.c_str(), bptr.length());
+    ceph::bufferlist bl;
+    bl.push_back(bptr);
+    return bl;
+  }
+
+  void apply_delta_and_adjust_crc(
+    paddr_t base, const ceph::bufferlist &_bl) {
+    assert(_bl.length());
+    ceph::bufferlist bl = _bl;
+    bl.rebuild();
+    typename node_layout_t::delta_buffer_t buffer;
+    buffer.copy_in(bl.front().c_str(), bl.front().length());
+    buffer.replay(*this);
+    this->set_last_committed_crc(this->get_crc32c());
+    this->resolve_relative_addrs(base);
+  }
+
+  constexpr static size_t get_min_capacity() {
+    return (node_layout_t::get_capacity() - 1) / 2;
+  }
+
+  bool at_max_capacity() const {
+    assert(this->get_size() <= node_layout_t::get_capacity());
+    return this->get_size() == node_layout_t::get_capacity();
+  }
+
+  bool at_min_capacity() const {
+    assert(this->get_size() >= (get_min_capacity() - 1));
+    return this->get_size() <= get_min_capacity();
+  }
+
+  bool below_min_capacity() const {
+    assert(this->get_size() >= (get_min_capacity() - 1));
+    return this->get_size() < get_min_capacity();
+  }
+};
+
+} // namespace crimson::os::seastore
index a6ee25e9170240583c7738dcfa0f99a2a7bf1656..e3e69421fc641d27711f7548db7caa32b09ba79b 100644 (file)
@@ -27,24 +27,6 @@ std::ostream& operator<<(std::ostream& out, const lba_map_val_t& v)
              << ")";
 }
 
-std::ostream &LBAInternalNode::print_detail(std::ostream &out) const
-{
-  return out << ", size=" << get_size()
-            << ", meta=" << get_meta();
-}
-
-void LBAInternalNode::resolve_relative_addrs(paddr_t base)
-{
-  LOG_PREFIX(LBAInternalNode::resolve_relative_addrs);
-  for (auto i: *this) {
-    if (i->get_val().is_relative()) {
-      auto updated = base.add_relative(i->get_val());
-      TRACE("{} -> {}", i->get_val(), updated);
-      i->set_val(updated);
-    }
-  }
-}
-
 std::ostream &LBALeafNode::print_detail(std::ostream &out) const
 {
   return out << ", size=" << get_size()
index 8b2530e7c91e4221acb07e015933316769677e1a..004a5778001c84f7e01780ea9e3b90b3a752e369 100644 (file)
 
 #include "crimson/os/seastore/btree/btree_range_pin.h"
 #include "crimson/os/seastore/btree/fixed_kv_btree.h"
+#include "crimson/os/seastore/btree/fixed_kv_node.h"
 
 namespace crimson::os::seastore::lba_manager::btree {
 
 using base_iertr = LBAManager::base_iertr;
+using LBANode = FixedKVNode<laddr_t>;
 
 /**
  * lba_map_val_t
@@ -54,66 +56,9 @@ std::ostream& operator<<(std::ostream& out, const lba_map_val_t&);
 
 constexpr size_t LBA_BLOCK_SIZE = 4096;
 
-/**
- * lba_node_meta_le_t
- *
- * On disk layout for fixed_kv_node_meta_t
- */
-struct lba_node_meta_le_t {
-  laddr_le_t begin = laddr_le_t(0);
-  laddr_le_t end = laddr_le_t(0);
-  depth_le_t depth = init_depth_le(0);
-
-  lba_node_meta_le_t() = default;
-  lba_node_meta_le_t(const lba_node_meta_le_t &) = default;
-  explicit lba_node_meta_le_t(const fixed_kv_node_meta_t<laddr_t> &val)
-    : begin(ceph_le64(val.begin)),
-      end(ceph_le64(val.end)),
-      depth(init_depth_le(val.depth)) {}
-
-  operator fixed_kv_node_meta_t<laddr_t>() const {
-    return fixed_kv_node_meta_t<laddr_t>{ begin, end, depth };
-  }
-};
-
-/**
- * LBANode
- *
- * Base class enabling recursive lookup between internal and leaf nodes.
- */
-struct LBANode : CachedExtent {
-  using LBANodeRef = TCachedExtentRef<LBANode>;
-
-  btree_range_pin_t<laddr_t> pin;
-
-  LBANode(ceph::bufferptr &&ptr) : CachedExtent(std::move(ptr)), pin(this) {}
-  LBANode(const LBANode &rhs)
-    : CachedExtent(rhs), pin(rhs.pin, this) {}
-
-  virtual fixed_kv_node_meta_t<laddr_t> get_node_meta() const = 0;
+using lba_node_meta_t = fixed_kv_node_meta_t<laddr_t>;
 
-  virtual ~LBANode() = default;
-
-  void on_delta_write(paddr_t record_block_offset) final {
-    // All in-memory relative addrs are necessarily record-relative
-    assert(get_prior_instance());
-    pin.take_pin(get_prior_instance()->cast<LBANode>()->pin);
-    resolve_relative_addrs(record_block_offset);
-  }
-
-  void on_initial_write() final {
-    // All in-memory relative addrs are necessarily block-relative
-    resolve_relative_addrs(get_paddr());
-  }
-
-  void on_clean_read() final {
-    // From initial write of block, relative addrs are necessarily block-relative
-    resolve_relative_addrs(get_paddr());
-  }
-
-  virtual void resolve_relative_addrs(paddr_t base) = 0;
-};
-using LBANodeRef = LBANode::LBANodeRef;
+using lba_node_meta_le_t = fixed_kv_node_meta_le_t<laddr_le_t>;
 
 /**
  * LBAInternalNode
@@ -134,197 +79,22 @@ using LBANodeRef = LBANode::LBANodeRef;
  */
 constexpr size_t INTERNAL_NODE_CAPACITY = 254;
 struct LBAInternalNode
-  : LBANode,
-    common::FixedKVNodeLayout<
+  : FixedKVInternalNode<
       INTERNAL_NODE_CAPACITY,
-      fixed_kv_node_meta_t<laddr_t>, lba_node_meta_le_t,
       laddr_t, laddr_le_t,
-      paddr_t, paddr_le_t> {
+      LBA_BLOCK_SIZE,
+      LBAInternalNode> {
   using Ref = TCachedExtentRef<LBAInternalNode>;
   using internal_iterator_t = const_iterator;
   template <typename... T>
   LBAInternalNode(T&&... t) :
-    LBANode(std::forward<T>(t)...),
-    FixedKVNodeLayout(get_bptr().c_str()) {}
+    FixedKVInternalNode(std::forward<T>(t)...) {}
 
   static constexpr extent_types_t TYPE = extent_types_t::LADDR_INTERNAL;
 
-  fixed_kv_node_meta_t<laddr_t> get_node_meta() const { return get_meta(); }
-
-  CachedExtentRef duplicate_for_write() final {
-    assert(delta_buffer.empty());
-    return CachedExtentRef(new LBAInternalNode(*this));
-  };
-
-  delta_buffer_t delta_buffer;
-  delta_buffer_t *maybe_get_delta_buffer() {
-    return is_mutation_pending() ? &delta_buffer : nullptr;
-  }
-
-  void update(
-    const_iterator iter,
-    paddr_t addr) {
-    return journal_update(
-      iter,
-      maybe_generate_relative(addr),
-      maybe_get_delta_buffer());
-  }
-
-  void insert(
-    const_iterator iter,
-    laddr_t pivot,
-    paddr_t addr) {
-    return journal_insert(
-      iter,
-      pivot,
-      maybe_generate_relative(addr),
-      maybe_get_delta_buffer());
-  }
-
-  void remove(const_iterator iter) {
-    return journal_remove(
-      iter,
-      maybe_get_delta_buffer());
-  }
-
-  void replace(
-    const_iterator iter,
-    laddr_t pivot,
-    paddr_t addr) {
-    return journal_replace(
-      iter,
-      pivot,
-      maybe_generate_relative(addr),
-      maybe_get_delta_buffer());
-  }
-
-  std::tuple<Ref, Ref, laddr_t>
-  make_split_children(op_context_t<laddr_t> c) {
-    auto left = c.cache.alloc_new_extent<LBAInternalNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto right = c.cache.alloc_new_extent<LBAInternalNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto pivot = split_into(*left, *right);
-    left->pin.set_range(left->get_meta());
-    right->pin.set_range(right->get_meta());
-    return std::make_tuple(
-      left,
-      right,
-      pivot);
-  }
-
-  Ref make_full_merge(
-    op_context_t<laddr_t> c,
-    Ref &right) {
-    auto replacement = c.cache.alloc_new_extent<LBAInternalNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    replacement->merge_from(*this, *right->cast<LBAInternalNode>());
-    replacement->pin.set_range(replacement->get_meta());
-    return replacement;
-  }
-
-  std::tuple<Ref, Ref, laddr_t>
-  make_balanced(
-    op_context_t<laddr_t> c,
-    Ref &_right,
-    bool prefer_left) {
-    ceph_assert(_right->get_type() == get_type());
-    auto &right = *_right->cast<LBAInternalNode>();
-    auto replacement_left = c.cache.alloc_new_extent<LBAInternalNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto replacement_right = c.cache.alloc_new_extent<LBAInternalNode>(
-      c.trans, LBA_BLOCK_SIZE);
-
-    auto pivot = balance_into_new_nodes(
-      *this,
-      right,
-      prefer_left,
-      *replacement_left,
-      *replacement_right);
-
-    replacement_left->pin.set_range(replacement_left->get_meta());
-    replacement_right->pin.set_range(replacement_right->get_meta());
-    return std::make_tuple(
-      replacement_left,
-      replacement_right,
-      pivot);
-  }
-
-  /**
-   * Internal relative addresses on read or in memory prior to commit
-   * are either record or block relative depending on whether this
-   * physical node is is_initial_pending() or just is_pending().
-   *
-   * User passes appropriate base depending on lifecycle and
-   * resolve_relative_addrs fixes up relative internal references
-   * based on base.
-   */
-  void resolve_relative_addrs(paddr_t base);
-  void node_resolve_vals(iterator from, iterator to) const final {
-    if (is_initial_pending()) {
-      for (auto i = from; i != to; ++i) {
-       if (i->get_val().is_relative()) {
-         assert(i->get_val().is_block_relative());
-         i->set_val(get_paddr().add_relative(i->get_val()));
-       }
-      }
-    }
-  }
-  void node_unresolve_vals(iterator from, iterator to) const final {
-    if (is_initial_pending()) {
-      for (auto i = from; i != to; ++i) {
-       if (i->get_val().is_relative()) {
-         assert(i->get_val().is_record_relative());
-         i->set_val(i->get_val() - get_paddr());
-       }
-      }
-    }
-  }
-
   extent_types_t get_type() const final {
     return TYPE;
   }
-
-  std::ostream &print_detail(std::ostream &out) const final;
-
-  ceph::bufferlist get_delta() final {
-    ceph::buffer::ptr bptr(delta_buffer.get_bytes());
-    delta_buffer.copy_out(bptr.c_str(), bptr.length());
-    ceph::bufferlist bl;
-    bl.push_back(bptr);
-    return bl;
-  }
-
-  void apply_delta_and_adjust_crc(
-    paddr_t base, const ceph::bufferlist &_bl) final {
-    assert(_bl.length());
-    ceph::bufferlist bl = _bl;
-    bl.rebuild();
-    delta_buffer_t buffer;
-    buffer.copy_in(bl.front().c_str(), bl.front().length());
-    buffer.replay(*this);
-    set_last_committed_crc(get_crc32c());
-    resolve_relative_addrs(base);
-  }
-
-  constexpr static size_t get_min_capacity() {
-    return (get_capacity() - 1) / 2;
-  }
-
-  bool at_max_capacity() const {
-    assert(get_size() <= get_capacity());
-    return get_size() == get_capacity();
-  }
-
-  bool at_min_capacity() const {
-    assert(get_size() >= (get_min_capacity() - 1));
-    return get_size() <= get_min_capacity();
-  }
-
-  bool below_min_capacity() const {
-    assert(get_size() >= (get_min_capacity() - 1));
-    return get_size() < get_min_capacity();
-  }
 };
 using LBAInternalNodeRef = LBAInternalNode::Ref;
 
@@ -372,36 +142,23 @@ struct lba_map_val_le_t {
 };
 
 struct LBALeafNode
-  : LBANode,
-    common::FixedKVNodeLayout<
+  : FixedKVLeafNode<
       LEAF_NODE_CAPACITY,
-      fixed_kv_node_meta_t<laddr_t>, lba_node_meta_le_t,
       laddr_t, laddr_le_t,
-      lba_map_val_t, lba_map_val_le_t> {
+      lba_map_val_t, lba_map_val_le_t,
+      LBA_BLOCK_SIZE,
+      LBALeafNode> {
   using Ref = TCachedExtentRef<LBALeafNode>;
   using internal_iterator_t = const_iterator;
   template <typename... T>
   LBALeafNode(T&&... t) :
-    LBANode(std::forward<T>(t)...),
-    FixedKVNodeLayout(get_bptr().c_str()) {}
+    FixedKVLeafNode(std::forward<T>(t)...) {}
 
   static constexpr extent_types_t TYPE = extent_types_t::LADDR_LEAF;
 
-  fixed_kv_node_meta_t<laddr_t> get_node_meta() const { return get_meta(); }
-
-  CachedExtentRef duplicate_for_write() final {
-    assert(delta_buffer.empty());
-    return CachedExtentRef(new LBALeafNode(*this));
-  };
-
-  delta_buffer_t delta_buffer;
-  delta_buffer_t *maybe_get_delta_buffer() {
-    return is_mutation_pending() ? &delta_buffer : nullptr;
-  }
-
   void update(
     const_iterator iter,
-    lba_map_val_t val) {
+    lba_map_val_t val) final {
     val.paddr = maybe_generate_relative(val.paddr);
     return journal_update(
       iter,
@@ -409,10 +166,10 @@ struct LBALeafNode
       maybe_get_delta_buffer());
   }
 
-  auto insert(
+  const_iterator insert(
     const_iterator iter,
     laddr_t addr,
-    lba_map_val_t val) {
+    lba_map_val_t val) final {
     val.paddr = maybe_generate_relative(val.paddr);
     journal_insert(
       iter,
@@ -422,65 +179,12 @@ struct LBALeafNode
     return iter;
   }
 
-  void remove(const_iterator iter) {
+  void remove(const_iterator iter) final {
     return journal_remove(
       iter,
       maybe_get_delta_buffer());
   }
 
-
-  std::tuple<Ref, Ref, laddr_t>
-  make_split_children(op_context_t<laddr_t> c) {
-    auto left = c.cache.alloc_new_extent<LBALeafNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto right = c.cache.alloc_new_extent<LBALeafNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto pivot = split_into(*left, *right);
-    left->pin.set_range(left->get_meta());
-    right->pin.set_range(right->get_meta());
-    return std::make_tuple(
-      left,
-      right,
-      pivot);
-  }
-
-  Ref make_full_merge(
-    op_context_t<laddr_t> c,
-    Ref &right) {
-    auto replacement = c.cache.alloc_new_extent<LBALeafNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    replacement->merge_from(*this, *right->cast<LBALeafNode>());
-    replacement->pin.set_range(replacement->get_meta());
-    return replacement;
-  }
-
-  std::tuple<Ref, Ref, laddr_t>
-  make_balanced(
-    op_context_t<laddr_t> c,
-    Ref &_right,
-    bool prefer_left) {
-    ceph_assert(_right->get_type() == get_type());
-    auto &right = *_right->cast<LBALeafNode>();
-    auto replacement_left = c.cache.alloc_new_extent<LBALeafNode>(
-      c.trans, LBA_BLOCK_SIZE);
-    auto replacement_right = c.cache.alloc_new_extent<LBALeafNode>(
-      c.trans, LBA_BLOCK_SIZE);
-
-    auto pivot = balance_into_new_nodes(
-      *this,
-      right,
-      prefer_left,
-      *replacement_left,
-      *replacement_right);
-
-    replacement_left->pin.set_range(replacement_left->get_meta());
-    replacement_right->pin.set_range(replacement_right->get_meta());
-    return std::make_tuple(
-      replacement_left,
-      replacement_right,
-      pivot);
-  }
-
   // See LBAInternalNode, same concept
   void resolve_relative_addrs(paddr_t base);
   void node_resolve_vals(iterator from, iterator to) const final {
@@ -509,50 +213,11 @@ struct LBALeafNode
     }
   }
 
-  ceph::bufferlist get_delta() final {
-    ceph::buffer::ptr bptr(delta_buffer.get_bytes());
-    delta_buffer.copy_out(bptr.c_str(), bptr.length());
-    ceph::bufferlist bl;
-    bl.push_back(bptr);
-    return bl;
-  }
-
-  void apply_delta_and_adjust_crc(
-    paddr_t base, const ceph::bufferlist &_bl) final {
-    assert(_bl.length());
-    ceph::bufferlist bl = _bl;
-    bl.rebuild();
-    delta_buffer_t buffer;
-    buffer.copy_in(bl.front().c_str(), bl.front().length());
-    buffer.replay(*this);
-    set_last_committed_crc(get_crc32c());
-    resolve_relative_addrs(base);
-  }
-
   extent_types_t get_type() const final {
     return TYPE;
   }
 
   std::ostream &print_detail(std::ostream &out) const final;
-
-  constexpr static size_t get_min_capacity() {
-    return (get_capacity() - 1) / 2;
-  }
-
-  bool at_max_capacity() const {
-    assert(get_size() <= get_capacity());
-    return get_size() == get_capacity();
-  }
-
-  bool at_min_capacity() const {
-    assert(get_size() >= (get_min_capacity() - 1));
-    return get_size() <= get_min_capacity();
-  }
-
-  bool below_min_capacity() const {
-    assert(get_size() >= (get_min_capacity() - 1));
-    return get_size() < get_min_capacity();
-  }
 };
 using LBALeafNodeRef = TCachedExtentRef<LBALeafNode>;
 
index 646f78b76af5050aa9f8be489c995d41d7d1decd..b875099e88df538d72e991945bacf6d5fdb042b9 100644 (file)
@@ -712,6 +712,8 @@ struct __attribute((packed)) paddr_le_t {
   ceph_le64 dev_addr =
     ceph_le64(P_ADDR_NULL.dev_addr);
 
+  using orig_type = paddr_t;
+
   paddr_le_t() = default;
   paddr_le_t(const paddr_t &addr) : dev_addr(ceph_le64(addr.dev_addr)) {}
 
@@ -800,6 +802,8 @@ constexpr laddr_t L_ADDR_LBAT = L_ADDR_MAX - 2;
 struct __attribute((packed)) laddr_le_t {
   ceph_le64 laddr = ceph_le64(L_ADDR_NULL);
 
+  using orig_type = laddr_t;
+
   laddr_le_t() = default;
   laddr_le_t(const laddr_le_t &) = default;
   explicit laddr_le_t(const laddr_t &addr)