From: Yingxin Cheng Date: Mon, 7 Jun 2021 05:15:58 +0000 (+0800) Subject: crimson/onode-staged-tree: pass node_size to lower node stages X-Git-Tag: v17.1.0~1674^2~10 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=55605f6e340e7522406f14a042cdbe60dab84ff1;p=ceph-ci.git crimson/onode-staged-tree: pass node_size to lower node stages Signed-off-by: Yingxin Cheng --- diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h index 769c66d6547..d54a8bacd90 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/item_iterator_stage.h @@ -38,8 +38,10 @@ class item_iterator_t { using value_input_t = value_input_type_t; using value_t = value_type_t; public: - item_iterator_t(const memory_range_t& range) - : p_items_start(range.p_start), p_items_end(range.p_end) { + item_iterator_t(const container_range_t& range) + : node_size{range.node_size}, + p_items_start(range.range.p_start), + p_items_end(range.range.p_end) { assert(p_items_start < p_items_end); next_item_range(p_items_end); } @@ -73,8 +75,8 @@ class item_iterator_t { node_offset_t size_overhead() const { return sizeof(node_offset_t) + get_key().size_overhead(); } - memory_range_t get_nxt_container() const { - return {item_range.p_start, get_key().p_start()}; + container_range_t get_nxt_container() const { + return {{item_range.p_start, get_key().p_start()}, node_size}; } bool has_next() const { assert(p_items_start <= item_range.p_start); @@ -109,8 +111,9 @@ class item_iterator_t { index_t index; ceph::decode(index, delta); - item_iterator_t ret({p_node_start + start_offset, - p_node_start + end_offset}); + item_iterator_t ret({{p_node_start + start_offset, + p_node_start + end_offset}, + node_size}); while (index > 0) { ++ret; --index; @@ -156,6 +159,7 @@ class item_iterator_t { item_range = {p_item_start, p_item_end}; } + extent_len_t node_size; const char* p_items_start; const char* p_items_end; mutable memory_range_t item_range; diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.cc b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.cc index 246e5eb4640..cb1d1db5314 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.cc +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.cc @@ -46,7 +46,7 @@ node_offset_t NODE_T::size_to_nxt_at(index_t index) const } template -memory_range_t NODE_T::get_nxt_container(index_t index) const +container_range_t NODE_T::get_nxt_container(index_t index) const { if constexpr (std::is_same_v) { ceph_abort("N3 internal node doesn't have the right part"); @@ -65,7 +65,7 @@ memory_range_t NODE_T::get_nxt_container(index_t index) const } else { // range for item_iterator_t } - return {item_p_start, item_p_end}; + return {{item_p_start, item_p_end}, node_size}; } } diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h index 35e5598f0b5..5e05c678bbe 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h @@ -98,7 +98,7 @@ class node_extent_t { node_offset_t size_to_nxt_at(index_t index) const; node_offset_t size_overhead_at(index_t index) const { return FieldType::ITEM_OVERHEAD; } - memory_range_t get_nxt_container(index_t index) const; + container_range_t get_nxt_container(index_t index) const; template std::enable_if_t @@ -107,7 +107,7 @@ class node_extent_t { if constexpr (NODE_TYPE == node_type_t::INTERNAL) { return p_fields->get_p_child_addr(index, node_size); } else { - auto range = get_nxt_container(index); + auto range = get_nxt_container(index).range; auto ret = reinterpret_cast(range.p_start); assert(range.p_start + ret->allocation_size() == range.p_end); return ret; diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage.h index 8b57562fca7..818810b0e86 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage.h @@ -859,7 +859,7 @@ struct staged { * size() -> node_offset_t * size_overhead() -> node_offset_t * (IS_BOTTOM) get_p_value() -> const value_t* - * (!IS_BOTTOM) get_nxt_container() -> nxt_stage::container_t + * (!IS_BOTTOM) get_nxt_container() -> container_range_t * (!IS_BOTTOM) size_to_nxt() -> node_offset_t * seek: * operator++() -> iterator_t& diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h index b2ca8d35577..94017bb6007 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/stage_types.h @@ -395,6 +395,11 @@ struct memory_range_t { const char* p_end; }; +struct container_range_t { + memory_range_t range; + extent_len_t node_size; +}; + enum class ContainerType { ITERATIVE, INDEXABLE }; // the input type to construct the value during insert. diff --git a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h index 81f60f6e5b7..849cdea2611 100644 --- a/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h +++ b/src/crimson/os/seastore/onode_manager/staged-fltree/stages/sub_items_stage.h @@ -42,7 +42,9 @@ class internal_sub_items_t { public: using num_keys_t = index_t; - internal_sub_items_t(const memory_range_t& range) { + internal_sub_items_t(const container_range_t& _range) + : node_size{_range.node_size} { + auto& range = _range.range; assert(range.p_start < range.p_end); assert((range.p_end - range.p_start) % sizeof(internal_sub_item_t) == 0); num_items = (range.p_end - range.p_start) / sizeof(internal_sub_item_t); @@ -92,8 +94,9 @@ class internal_sub_items_t { ceph::decode(end_offset, delta); assert(start_offset < end_offset); assert(end_offset <= NODE_BLOCK_SIZE); - return internal_sub_items_t({p_node_start + start_offset, - p_node_start + end_offset}); + return internal_sub_items_t({{p_node_start + start_offset, + p_node_start + end_offset}, + node_size}); } static node_offset_t header_size() { return 0u; } @@ -119,6 +122,7 @@ class internal_sub_items_t { class Appender; private: + extent_len_t node_size; index_t num_items; const internal_sub_item_t* p_first_item; }; @@ -164,7 +168,9 @@ class leaf_sub_items_t { // should be enough to index all keys under 64 KiB node using num_keys_t = uint16_t; - leaf_sub_items_t(const memory_range_t& range) { + leaf_sub_items_t(const container_range_t& _range) + : node_size{_range.node_size} { + auto& range = _range.range; assert(range.p_start < range.p_end); auto _p_num_keys = range.p_end - sizeof(num_keys_t); assert(range.p_start < _p_num_keys); @@ -261,8 +267,9 @@ class leaf_sub_items_t { ceph::decode(end_offset, delta); assert(start_offset < end_offset); assert(end_offset <= NODE_BLOCK_SIZE); - return leaf_sub_items_t({p_node_start + start_offset, - p_node_start + end_offset}); + return leaf_sub_items_t({{p_node_start + start_offset, + p_node_start + end_offset}, + node_size}); } static node_offset_t header_size() { return sizeof(num_keys_t); } @@ -288,6 +295,7 @@ class leaf_sub_items_t { class Appender; private: + extent_len_t node_size; // TODO: support unaligned access const num_keys_t* p_num_keys; const node_offset_packed_t* p_offsets;