using value_input_t = value_input_type_t<NODE_TYPE>;
using value_t = value_type_t<NODE_TYPE>;
public:
- item_iterator_t(const memory_range_t& range)
- : p_items_start(range.p_start), p_items_end(range.p_end) {
+ item_iterator_t(const container_range_t& range)
+ : node_size{range.node_size},
+ p_items_start(range.range.p_start),
+ p_items_end(range.range.p_end) {
assert(p_items_start < p_items_end);
next_item_range(p_items_end);
}
node_offset_t size_overhead() const {
return sizeof(node_offset_t) + get_key().size_overhead();
}
- memory_range_t get_nxt_container() const {
- return {item_range.p_start, get_key().p_start()};
+ container_range_t get_nxt_container() const {
+ return {{item_range.p_start, get_key().p_start()}, node_size};
}
bool has_next() const {
assert(p_items_start <= item_range.p_start);
index_t index;
ceph::decode(index, delta);
- item_iterator_t ret({p_node_start + start_offset,
- p_node_start + end_offset});
+ item_iterator_t ret({{p_node_start + start_offset,
+ p_node_start + end_offset},
+ node_size});
while (index > 0) {
++ret;
--index;
item_range = {p_item_start, p_item_end};
}
+ extent_len_t node_size;
const char* p_items_start;
const char* p_items_end;
mutable memory_range_t item_range;
}
template <typename FieldType, node_type_t NODE_TYPE>
-memory_range_t NODE_T::get_nxt_container(index_t index) const
+container_range_t NODE_T::get_nxt_container(index_t index) const
{
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("N3 internal node doesn't have the right part");
} else {
// range for item_iterator_t<NODE_TYPE>
}
- return {item_p_start, item_p_end};
+ return {{item_p_start, item_p_end}, node_size};
}
}
node_offset_t size_to_nxt_at(index_t index) const;
node_offset_t size_overhead_at(index_t index) const {
return FieldType::ITEM_OVERHEAD; }
- memory_range_t get_nxt_container(index_t index) const;
+ container_range_t get_nxt_container(index_t index) const;
template <typename T = FieldType>
std::enable_if_t<T::FIELD_TYPE == field_type_t::N3, const value_t*>
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
return p_fields->get_p_child_addr(index, node_size);
} else {
- auto range = get_nxt_container(index);
+ auto range = get_nxt_container(index).range;
auto ret = reinterpret_cast<const value_header_t*>(range.p_start);
assert(range.p_start + ret->allocation_size() == range.p_end);
return ret;
* size() -> node_offset_t
* size_overhead() -> node_offset_t
* (IS_BOTTOM) get_p_value() -> const value_t*
- * (!IS_BOTTOM) get_nxt_container() -> nxt_stage::container_t
+ * (!IS_BOTTOM) get_nxt_container() -> container_range_t
* (!IS_BOTTOM) size_to_nxt() -> node_offset_t
* seek:
* operator++() -> iterator_t&
const char* p_end;
};
+struct container_range_t {
+ memory_range_t range;
+ extent_len_t node_size;
+};
+
enum class ContainerType { ITERATIVE, INDEXABLE };
// the input type to construct the value during insert.
public:
using num_keys_t = index_t;
- internal_sub_items_t(const memory_range_t& range) {
+ internal_sub_items_t(const container_range_t& _range)
+ : node_size{_range.node_size} {
+ auto& range = _range.range;
assert(range.p_start < range.p_end);
assert((range.p_end - range.p_start) % sizeof(internal_sub_item_t) == 0);
num_items = (range.p_end - range.p_start) / sizeof(internal_sub_item_t);
ceph::decode(end_offset, delta);
assert(start_offset < end_offset);
assert(end_offset <= NODE_BLOCK_SIZE);
- return internal_sub_items_t({p_node_start + start_offset,
- p_node_start + end_offset});
+ return internal_sub_items_t({{p_node_start + start_offset,
+ p_node_start + end_offset},
+ node_size});
}
static node_offset_t header_size() { return 0u; }
class Appender;
private:
+ extent_len_t node_size;
index_t num_items;
const internal_sub_item_t* p_first_item;
};
// should be enough to index all keys under 64 KiB node
using num_keys_t = uint16_t;
- leaf_sub_items_t(const memory_range_t& range) {
+ leaf_sub_items_t(const container_range_t& _range)
+ : node_size{_range.node_size} {
+ auto& range = _range.range;
assert(range.p_start < range.p_end);
auto _p_num_keys = range.p_end - sizeof(num_keys_t);
assert(range.p_start < _p_num_keys);
ceph::decode(end_offset, delta);
assert(start_offset < end_offset);
assert(end_offset <= NODE_BLOCK_SIZE);
- return leaf_sub_items_t({p_node_start + start_offset,
- p_node_start + end_offset});
+ return leaf_sub_items_t({{p_node_start + start_offset,
+ p_node_start + end_offset},
+ node_size});
}
static node_offset_t header_size() { return sizeof(num_keys_t); }
class Appender;
private:
+ extent_len_t node_size;
// TODO: support unaligned access
const num_keys_t* p_num_keys;
const node_offset_packed_t* p_offsets;