using NodeImplURef = std::unique_ptr<NodeImpl>;
using level_t = uint8_t;
-constexpr auto INDEX_END = std::numeric_limits<size_t>::max();
-constexpr auto INDEX_LAST = INDEX_END - 0xf;
-constexpr auto INDEX_UPPER_BOUND = INDEX_END - 0xff;
-inline bool is_valid_index(size_t index) { return index < INDEX_UPPER_BOUND; }
+// a type only to index within a node, 32 bits should be enough
+using index_t = uint32_t;
+constexpr auto INDEX_END = std::numeric_limits<index_t>::max();
+constexpr auto INDEX_LAST = INDEX_END - 0x4;
+constexpr auto INDEX_UPPER_BOUND = INDEX_END - 0x8;
+inline bool is_valid_index(index_t index) { return index < INDEX_UPPER_BOUND; }
// TODO: decide by NODE_BLOCK_SIZE
using node_offset_t = uint16_t;
template <node_type_t NODE_TYPE>
template <KeyT KT>
-bool APPEND_T::append(const ITER_T& src, size_t& items) {
+bool APPEND_T::append(const ITER_T& src, index_t& items) {
auto p_end = src.p_end();
bool append_till_end = false;
if (is_valid_index(items)) {
// container type system
using key_get_type = const ns_oid_view_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::ITERATIVE;
- size_t index() const { return _index; }
+ index_t index() const { return _index; }
key_get_type get_key() const {
if (!key.has_value()) {
key = ns_oid_view_t(item_range.p_end);
mutable memory_range_t item_range;
mutable node_offset_t back_offset;
mutable std::optional<ns_oid_view_t> key;
- mutable size_t _index = 0u;
+ mutable index_t _index = 0u;
};
template <node_type_t NODE_TYPE>
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
- bool append(const item_iterator_t<NODE_TYPE>& src, size_t& items);
+ bool append(const item_iterator_t<NODE_TYPE>& src, index_t& items);
char* wrap() { return p_append; }
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
}
template <typename FieldType, node_type_t NODE_TYPE>
-node_offset_t NODE_T::size_to_nxt_at(size_t index) const {
+node_offset_t NODE_T::size_to_nxt_at(index_t index) const {
assert(index < keys());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
}
template <typename FieldType, node_type_t NODE_TYPE>
-memory_range_t NODE_T::get_nxt_container(size_t index) const {
+memory_range_t NODE_T::get_nxt_container(index_t index) const {
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("N3 internal node doesn't have the right part");
} else {
template <KeyT KT>
memory_range_t NODE_T::insert_prefix_at(
NodeExtentMutable& mut, const node_extent_t& node, const full_key_t<KT>& key,
- size_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound) {
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
assert(index <= node.keys());
#define IPA_TEMPLATE(FT, NT, KT) \
template memory_range_t NODE_INST(FT, NT)::insert_prefix_at<KT>( \
NodeExtentMutable&, const node_extent_t&, const full_key_t<KT>&, \
- size_t, node_offset_t, const char*)
+ index_t, node_offset_t, const char*)
IPA_TEMPLATE(node_fields_0_t, node_type_t::INTERNAL, KeyT::VIEW);
IPA_TEMPLATE(node_fields_1_t, node_type_t::INTERNAL, KeyT::VIEW);
IPA_TEMPLATE(node_fields_2_t, node_type_t::INTERNAL, KeyT::VIEW);
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::update_size_at(
- NodeExtentMutable& mut, const node_extent_t& node, size_t index, int change) {
+ NodeExtentMutable& mut, const node_extent_t& node, index_t index, int change) {
assert(index < node.keys());
FieldType::update_size_at(mut, node.fields(), index, change);
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_until(
- NodeExtentMutable& mut, const node_extent_t& node, size_t index) {
+ NodeExtentMutable& mut, const node_extent_t& node, index_t index) {
assert(!node.is_level_tail());
auto keys = node.keys();
assert(index <= keys);
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_at(
NodeExtentMutable& mut, const node_extent_t& node,
- size_t index, node_offset_t trimmed) {
+ index_t index, node_offset_t trimmed) {
assert(!node.is_level_tail());
assert(index < node.keys());
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
-void APPEND_T::append(const node_extent_t& src, size_t from, size_t items) {
+void APPEND_T::append(const node_extent_t& src, index_t from, index_t items) {
assert(from <= src.keys());
if (p_src == nullptr) {
p_src = &src;
// container type system
using key_get_type = typename FieldType::key_get_type;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
- size_t keys() const { return p_fields->num_keys; }
- key_get_type operator[] (size_t index) const { return p_fields->get_key(index); }
- node_offset_t size_before(size_t index) const {
+ index_t keys() const { return p_fields->num_keys; }
+ key_get_type operator[] (index_t index) const { return p_fields->get_key(index); }
+ node_offset_t size_before(index_t index) const {
auto free_size = p_fields->template free_size_before<NODE_TYPE>(index);
assert(total_size() >= free_size);
return total_size() - free_size;
}
- node_offset_t size_to_nxt_at(size_t index) const;
- node_offset_t size_overhead_at(size_t index) const {
+ node_offset_t size_to_nxt_at(index_t index) const;
+ node_offset_t size_overhead_at(index_t index) const {
return FieldType::ITEM_OVERHEAD; }
- memory_range_t get_nxt_container(size_t index) const;
+ memory_range_t get_nxt_container(index_t index) const;
template <typename T = FieldType>
std::enable_if_t<T::FIELD_TYPE == field_type_t::N3, const value_t*>
- get_p_value(size_t index) const {
+ get_p_value(index_t index) const {
assert(index < keys());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
return &p_fields->child_addrs[index];
static const value_t* insert_at(
NodeExtentMutable& mut, const node_extent_t&,
const full_key_t<KT>& key, const value_t& value,
- size_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound) {
if constexpr (FIELD_TYPE == field_type_t::N3) {
ceph_abort("not implemented");
} else {
static memory_range_t insert_prefix_at(
NodeExtentMutable&, const node_extent_t&,
const full_key_t<KT>& key,
- size_t index, node_offset_t size, const char* p_left_bound);
+ index_t index, node_offset_t size, const char* p_left_bound);
static void update_size_at(
- NodeExtentMutable&, const node_extent_t&, size_t index, int change);
+ NodeExtentMutable&, const node_extent_t&, index_t index, int change);
static node_offset_t trim_until(
- NodeExtentMutable&, const node_extent_t&, size_t index);
+ NodeExtentMutable&, const node_extent_t&, index_t index);
static node_offset_t trim_at(NodeExtentMutable&, const node_extent_t&,
- size_t index, node_offset_t trimmed);
+ index_t index, node_offset_t trimmed);
template <KeyT KT>
class Appender;
p_append_left = p_start + FieldType::HEADER_SIZE;
p_append_right = p_start + FieldType::SIZE;
}
- void append(const node_extent_t& src, size_t from, size_t items);
+ void append(const node_extent_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const value_t&, const value_t*&);
char* wrap();
std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
template <typename SlotType>
void F013_T::update_size_at(
- NodeExtentMutable& mut, const me_t& node, size_t index, int change) {
+ NodeExtentMutable& mut, const me_t& node, index_t index, int change) {
assert(index <= node.num_keys);
for (const auto* p_slot = &node.slots[index];
p_slot < &node.slots[node.num_keys];
template <KeyT KT>
void F013_T::insert_at(
NodeExtentMutable& mut, const full_key_t<KT>& key,
- const me_t& node, size_t index, node_offset_t size_right) {
+ const me_t& node, index_t index, node_offset_t size_right) {
assert(index <= node.num_keys);
update_size_at(mut, node, index, size_right);
auto p_insert = const_cast<char*>(fields_start(node)) +
}
#define IA_TEMPLATE(ST, KT) template void F013_INST(ST):: \
insert_at<KT>(NodeExtentMutable&, const full_key_t<KT>&, \
- const F013_INST(ST)&, size_t, node_offset_t)
+ const F013_INST(ST)&, index_t, node_offset_t)
IA_TEMPLATE(slot_0_t, KeyT::VIEW);
IA_TEMPLATE(slot_1_t, KeyT::VIEW);
IA_TEMPLATE(slot_3_t, KeyT::VIEW);
template <node_type_t NODE_TYPE, typename FieldType>
node_range_t fields_free_range_before(
- const FieldType& node, size_t index) {
+ const FieldType& node, index_t index) {
assert(index <= node.num_keys);
node_offset_t offset_start = node.get_key_start_offset(index);
node_offset_t offset_end =
bool is_level_tail() const { return header.get_is_level_tail(); }
node_offset_t total_size() const { return SIZE; }
- key_get_type get_key(size_t index) const {
+ key_get_type get_key(index_t index) const {
assert(index < num_keys);
return slots[index].key;
}
- node_offset_t get_key_start_offset(size_t index) const {
+ node_offset_t get_key_start_offset(index_t index) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(SlotType) * index;
assert(offset < SIZE);
return offset;
}
- node_offset_t get_item_start_offset(size_t index) const {
+ node_offset_t get_item_start_offset(index_t index) const {
assert(index < num_keys);
auto offset = slots[index].right_offset;
assert(offset <= SIZE);
return offset;
}
- const void* p_offset(size_t index) const {
+ const void* p_offset(index_t index) const {
assert(index < num_keys);
return &slots[index].right_offset;
}
- node_offset_t get_item_end_offset(size_t index) const {
+ node_offset_t get_item_end_offset(index_t index) const {
return index == 0 ? SIZE : get_item_start_offset(index - 1);
}
template <node_type_t NODE_TYPE>
- node_offset_t free_size_before(size_t index) const {
+ node_offset_t free_size_before(index_t index) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index);
return range.end - range.start;
}
template <KeyT KT>
static void insert_at(
NodeExtentMutable&, const full_key_t<KT>& key,
- const me_t& node, size_t index, node_offset_t size_right);
+ const me_t& node, index_t index, node_offset_t size_right);
static void update_size_at(
- NodeExtentMutable&, const me_t& node, size_t index, int change);
+ NodeExtentMutable&, const me_t& node, index_t index, int change);
static void append_key(
NodeExtentMutable&, const key_t& key, char*& p_append);
template <KeyT KT>
bool is_level_tail() const { return header.get_is_level_tail(); }
node_offset_t total_size() const { return SIZE; }
- key_get_type get_key(size_t index) const {
+ key_get_type get_key(index_t index) const {
assert(index < num_keys);
node_offset_t item_end_offset =
(index == 0 ? SIZE : offsets[index - 1]);
const char* p_start = fields_start(*this);
return key_t(p_start + item_end_offset);
}
- node_offset_t get_key_start_offset(size_t index) const {
+ node_offset_t get_key_start_offset(index_t index) const {
assert(index <= num_keys);
auto offset = HEADER_SIZE + sizeof(node_offset_t) * num_keys;
assert(offset <= SIZE);
return offset;
}
- node_offset_t get_item_start_offset(size_t index) const {
+ node_offset_t get_item_start_offset(index_t index) const {
assert(index < num_keys);
auto offset = offsets[index];
assert(offset <= SIZE);
return offset;
}
- const void* p_offset(size_t index) const {
+ const void* p_offset(index_t index) const {
assert(index < num_keys);
return &offsets[index];
}
- node_offset_t get_item_end_offset(size_t index) const {
+ node_offset_t get_item_end_offset(index_t index) const {
return index == 0 ? SIZE : get_item_start_offset(index - 1);
}
template <node_type_t NODE_TYPE>
- node_offset_t free_size_before(size_t index) const {
+ node_offset_t free_size_before(index_t index) const {
auto range = fields_free_range_before<NODE_TYPE>(*this, index);
return range.end - range.start;
}
template <KeyT KT>
static void insert_at(
NodeExtentMutable& mut, const full_key_t<KT>& key,
- const node_fields_2_t& node, size_t index, node_offset_t size_right) {
+ const node_fields_2_t& node, index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
- NodeExtentMutable& mut, const node_fields_2_t& node, size_t index, int change) {
+ NodeExtentMutable& mut, const node_fields_2_t& node, index_t index, int change) {
ceph_abort("not implemented");
}
static void append_key(
return SIZE;
}
}
- key_get_type get_key(size_t index) const {
+ key_get_type get_key(index_t index) const {
assert(index < num_keys);
return keys[index];
}
template <node_type_t NODE_TYPE>
std::enable_if_t<NODE_TYPE == node_type_t::INTERNAL, node_offset_t>
- free_size_before(size_t index) const {
+ free_size_before(index_t index) const {
assert(index <= num_keys);
assert(num_keys <= (is_level_tail() ? MAX_NUM_KEYS - 1 : MAX_NUM_KEYS));
auto free = (MAX_NUM_KEYS - index) * (sizeof(snap_gen_t) + sizeof(laddr_t));
template <KeyT KT>
static void insert_at(
NodeExtentMutable& mut, const full_key_t<KT>& key,
- const me_t& node, size_t index, node_offset_t size_right) {
+ const me_t& node, index_t index, node_offset_t size_right) {
ceph_abort("not implemented");
}
static void update_size_at(
- NodeExtentMutable& mut, const me_t& node, size_t index, int change) {
+ NodeExtentMutable& mut, const me_t& node, index_t index, int change) {
ceph_abort("not implemented");
}
namespace crimson::os::seastore::onode {
struct search_result_bs_t {
- size_t index;
+ index_t index;
MatchKindBS match;
};
template <typename FGetKey>
search_result_bs_t binary_search(
const full_key_t<KeyT::HOBJ>& key,
- size_t begin, size_t end, FGetKey&& f_get_key) {
+ index_t begin, index_t end, FGetKey&& f_get_key) {
assert(begin <= end);
while (begin < end) {
auto total = begin + end;
template <typename PivotType, typename FGet>
search_result_bs_t binary_search_r(
- size_t rend, size_t rbegin, FGet&& f_get, const PivotType& key) {
+ index_t rend, index_t rbegin, FGet&& f_get, const PivotType& key) {
assert(rend <= rbegin);
while (rend < rbegin) {
auto total = rend + rbegin + 1;
static constexpr auto STAGE = Params::STAGE;
template <bool is_exclusive>
- static void _left_or_right(size_t& split_index, size_t insert_index,
+ static void _left_or_right(index_t& split_index, index_t insert_index,
std::optional<bool>& is_insert_left) {
assert(!is_insert_left.has_value());
assert(is_valid_index(split_index));
/*
* indexable container type system:
* CONTAINER_TYPE = ContainerType::INDEXABLE
- * keys() const -> size_t
- * operator[](size_t) const -> key_get_type
- * size_before(size_t) const -> node_offset_t
- * size_overhead_at(size_t) const -> node_offset_t
- * (IS_BOTTOM) get_p_value(size_t) const -> const value_t*
- * (!IS_BOTTOM) size_to_nxt_at(size_t) const -> node_offset_t
- * (!IS_BOTTOM) get_nxt_container(size_t) const
+ * keys() const -> index_t
+ * operator[](index_t) const -> key_get_type
+ * size_before(index_t) const -> node_offset_t
+ * size_overhead_at(index_t) const -> node_offset_t
+ * (IS_BOTTOM) get_p_value(index_t) const -> const value_t*
+ * (!IS_BOTTOM) size_to_nxt_at(index_t) const -> node_offset_t
+ * (!IS_BOTTOM) get_nxt_container(index_t) const
* static:
* header_size() -> node_offset_t
* estimate_insert(key, value) -> node_offset_t
assert(container.keys());
}
- size_t index() const {
+ index_t index() const {
return _index;
}
key_get_type get_key() const {
++_index;
return *this;
}
- void seek_at(size_t index) {
+ void seek_at(index_t index) {
assert(index < container.keys());
seek_till_end(index);
}
- void seek_till_end(size_t index) {
+ void seek_till_end(index_t index) {
assert(!is_end());
assert(this->index() == 0);
assert(index <= container.keys());
MatchKindBS seek(const full_key_t<KeyT::HOBJ>& key, bool exclude_last) {
assert(!is_end());
assert(index() == 0);
- size_t end_index = container.keys();
+ index_t end_index = container.keys();
if (exclude_last) {
assert(end_index);
--end_index;
assert(compare_to<KeyT::HOBJ>(key, container[end_index]) == MatchKindCMP::LT);
}
auto ret = binary_search(key, _index, end_index,
- [this] (size_t index) { return container[index]; });
+ [this] (index_t index) { return container[index]; });
_index = ret.index;
return ret.match;
}
template <bool is_exclusive>
size_t seek_split_inserted(
size_t start_size, size_t extra_size, size_t target_size,
- size_t& insert_index, size_t insert_size,
+ index_t& insert_index, size_t insert_size,
std::optional<bool>& is_insert_left) {
assert(!is_end());
assert(index() == 0);
auto start_size_1 = start_size + extra_size;
auto f_get_used_size = [this, start_size, start_size_1,
- insert_index, insert_size] (size_t index) {
+ insert_index, insert_size] (index_t index) {
size_t current_size;
if (unlikely(index == 0)) {
current_size = start_size;
}
return current_size;
};
- size_t s_end;
+ index_t s_end;
if constexpr (is_exclusive) {
s_end = container.keys();
} else {
assert(!is_end());
assert(index() == 0);
auto start_size_1 = start_size + extra_size;
- auto f_get_used_size = [this, start_size, start_size_1] (size_t index) {
+ auto f_get_used_size = [this, start_size, start_size_1] (index_t index) {
size_t current_size;
if (unlikely(index == 0)) {
current_size = start_size;
// Note: possible to return an end iterater if to_index == INDEX_END
template <KeyT KT>
void copy_out_until(
- typename container_t::template Appender<KT>& appender, size_t& to_index) {
+ typename container_t::template Appender<KT>& appender, index_t& to_index) {
auto num_keys = container.keys();
- size_t items;
+ index_t items;
if (to_index == INDEX_END) {
items = num_keys - _index;
appender.append(container, _index, items);
private:
container_t container;
- size_t _index = 0;
+ index_t _index = 0;
};
template <ContainerType CTYPE>
/*
* iterative container type system (!IS_BOTTOM):
* CONTAINER_TYPE = ContainerType::ITERATIVE
- * index() const -> size_t
+ * index() const -> index_t
* get_key() const -> key_get_type
* size() const -> node_offset_t
* size_to_nxt() const -> node_offset_t
assert(index() == 0);
}
- size_t index() const {
+ index_t index() const {
if (is_end()) {
return end_index;
} else {
++container;
return *this;
}
- void seek_at(size_t index) {
+ void seek_at(index_t index) {
assert(!is_end());
assert(this->index() == 0);
while (index > 0) {
--index;
}
}
- void seek_till_end(size_t index) {
+ void seek_till_end(index_t index) {
assert(!is_end());
assert(this->index() == 0);
while (index > 0) {
template <bool is_exclusive>
size_t seek_split_inserted(
size_t start_size, size_t extra_size, size_t target_size,
- size_t& insert_index, size_t insert_size,
+ index_t& insert_index, size_t insert_size,
std::optional<bool>& is_insert_left) {
assert(!is_end());
assert(index() == 0);
size_t current_size = start_size;
- size_t split_index = 0;
+ index_t split_index = 0;
extra_size += header_size();
do {
if constexpr (!is_exclusive) {
// Note: possible to return an end iterater if to_index == INDEX_END
template <KeyT KT>
void copy_out_until(
- typename container_t::template Appender<KT>& appender, size_t& to_index) {
+ typename container_t::template Appender<KT>& appender, index_t& to_index) {
if (is_end()) {
assert(!container.has_next());
if (to_index == INDEX_END) {
assert(to_index == index());
return;
}
- size_t items;
+ index_t items;
if (to_index == INDEX_END || to_index == INDEX_LAST) {
items = to_index;
} else {
private:
container_t container;
bool _is_end = false;
- size_t end_index;
+ index_t end_index;
};
/*
* from a *non-empty* container.
* cstr(const container_t&)
* access:
- * index() -> size_t
+ * index() -> index_t
* get_key() -> key_get_type (const reference or value type)
* is_last() -> bool
* is_end() -> bool
public:
StagedIterator() = default;
bool valid() const { return iter.has_value(); }
- size_t index() const {
+ index_t index() const {
return iter->index();
}
bool is_end() const { return iter->is_end(); }
/*
* container appender type system
* container_t::Appender(NodeExtentMutable& mut, char* p_append)
- * append(const container_t& src, size_t from, size_t items)
+ * append(const container_t& src, index_t from, index_t items)
* wrap() -> char*
* IF !IS_BOTTOM:
* open_nxt(const key_get_type&)
assert(!valid());
}
bool valid() const { return appender.has_value(); }
- size_t index() const {
+ index_t index() const {
assert(valid());
return _index;
}
_index = 0;
}
// possible to make src_iter end if to_index == INDEX_END
- void append_until(StagedIterator& src_iter, size_t& to_index) {
+ void append_until(StagedIterator& src_iter, index_t& to_index) {
assert(!require_wrap_nxt);
auto s_index = src_iter.index();
src_iter.get().template copy_out_until<KT>(*appender, to_index);
}
private:
std::optional<typename container_t::template Appender<KT>> appender;
- size_t _index;
+ index_t _index;
bool require_wrap_nxt = false;
};
template <KeyT KT>
static void _append_range(
- StagedIterator& src_iter, StagedAppender<KT>& appender, size_t& to_index) {
+ StagedIterator& src_iter, StagedAppender<KT>& appender, index_t& to_index) {
if (src_iter.is_end()) {
// append done
assert(to_index == INDEX_END);
if (appender.in_progress()) {
// appender has appended something at the current item,
// cannot append the current item as-a-whole
- size_t to_index_nxt = INDEX_END;
+ index_t to_index_nxt = INDEX_END;
NXT_STAGE_T::template _append_range<KT>(
src_iter.nxt(), appender.get_nxt(), to_index_nxt);
++src_iter;
} else if (src_iter.in_progress()) {
// src_iter is not at the beginning of the current item,
// cannot append the current item as-a-whole
- size_t to_index_nxt = INDEX_END;
+ index_t to_index_nxt = INDEX_END;
NXT_STAGE_T::template _append_range<KT>(
src_iter.nxt(), appender.open_nxt(src_iter.get_key()), to_index_nxt);
++src_iter;
template <KeyT KT>
static void append_until(StagedIterator& src_iter, StagedAppender<KT>& appender,
position_t& position, match_stage_t stage) {
- size_t from_index = src_iter.index();
- size_t& to_index = position.index;
+ index_t from_index = src_iter.index();
+ index_t& to_index = position.index;
assert(from_index <= to_index);
if constexpr (IS_BOTTOM) {
assert(stage == STAGE);
return false;
}
}
- size_t& index_by_stage(match_stage_t stage) {
+ index_t& index_by_stage(match_stage_t stage) {
assert(stage <= STAGE);
if (STAGE == stage) {
return index;
return {INDEX_END, nxt_t::end()};
}
- size_t index;
+ index_t index;
nxt_t nxt;
};
template <match_stage_t STAGE>
return false;
}
}
- size_t& index_by_stage(match_stage_t stage) {
+ index_t& index_by_stage(match_stage_t stage) {
assert(stage == STAGE_BOTTOM);
return index;
}
static me_t begin() { return {0u}; }
static me_t end() { return {INDEX_END}; }
- size_t index;
+ index_t index;
};
template <>
inline std::ostream& operator<<(std::ostream& os, const staged_position_t<STAGE_BOTTOM>& pos) {
}
template <typename T = me_t>
static std::enable_if_t<STAGE != STAGE_BOTTOM, T> from_nxt(
- size_t index, const staged_result_t<NODE_TYPE, STAGE - 1>& nxt_stage_result) {
+ index_t index, const staged_result_t<NODE_TYPE, STAGE - 1>& nxt_stage_result) {
return {{index, nxt_stage_result.position},
nxt_stage_result.p_value,
nxt_stage_result.mstat};
const laddr_packed_t* internal_sub_items_t::insert_at(
NodeExtentMutable& mut, const internal_sub_items_t& sub_items,
const full_key_t<KT>& key, const laddr_packed_t& value,
- size_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound) {
assert(index <= sub_items.keys());
assert(size == estimate_insert<KT>(key, value));
const char* p_shift_start = p_left_bound;
}
template const laddr_packed_t* internal_sub_items_t::insert_at<KeyT::VIEW>(
NodeExtentMutable&, const internal_sub_items_t&, const full_key_t<KeyT::VIEW>&,
- const laddr_packed_t&, size_t, node_offset_t, const char*);
+ const laddr_packed_t&, index_t, node_offset_t, const char*);
node_offset_t internal_sub_items_t::trim_until(
- NodeExtentMutable&, internal_sub_items_t& items, size_t index) {
+ NodeExtentMutable&, internal_sub_items_t& items, index_t index) {
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
- const internal_sub_items_t& src, size_t from, size_t items) {
+ const internal_sub_items_t& src, index_t from, index_t items) {
assert(from <= src.keys());
if (items == 0) {
return;
const onode_t* leaf_sub_items_t::insert_at(
NodeExtentMutable& mut, const leaf_sub_items_t& sub_items,
const full_key_t<KT>& key, const onode_t& value,
- size_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound) {
assert(index <= sub_items.keys());
assert(size == estimate_insert<KT>(key, value));
// a. [... item(index)] << size
}
template const onode_t* leaf_sub_items_t::insert_at<KeyT::HOBJ>(
NodeExtentMutable&, const leaf_sub_items_t&, const full_key_t<KeyT::HOBJ>&,
- const onode_t&, size_t, node_offset_t, const char*);
+ const onode_t&, index_t, node_offset_t, const char*);
node_offset_t leaf_sub_items_t::trim_until(
- NodeExtentMutable& mut, leaf_sub_items_t& items, size_t index) {
+ NodeExtentMutable& mut, leaf_sub_items_t& items, index_t index) {
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
if (index == keys) {
return 0;
}
- size_t trim_items = keys - index;
+ index_t trim_items = keys - index;
const char* p_items_start = items.p_start();
const char* p_shift_start = items.get_item_end(index);
const char* p_shift_end = items.get_item_end(0);
*/
class internal_sub_items_t {
public:
- using num_keys_t = size_t;
+ using num_keys_t = index_t;
internal_sub_items_t(const memory_range_t& range) {
assert(range.p_start < range.p_end);
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return num_items; }
- key_get_type operator[](size_t index) const {
+ key_get_type operator[](index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_key();
}
- node_offset_t size_before(size_t index) const {
+ node_offset_t size_before(index_t index) const {
size_t ret = index * sizeof(internal_sub_item_t);
assert(ret < NODE_BLOCK_SIZE);
return ret;
}
- const laddr_packed_t* get_p_value(size_t index) const {
+ const laddr_packed_t* get_p_value(index_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_p_value();
}
- node_offset_t size_overhead_at(size_t index) const { return 0u; }
+ node_offset_t size_overhead_at(index_t index) const { return 0u; }
static node_offset_t header_size() { return 0u; }
static const laddr_packed_t* insert_at(
NodeExtentMutable&, const internal_sub_items_t&,
const full_key_t<KT>&, const laddr_packed_t&,
- size_t index, node_offset_t size, const char* p_left_bound);
+ index_t index, node_offset_t size, const char* p_left_bound);
- static node_offset_t trim_until(NodeExtentMutable&, internal_sub_items_t&, size_t);
+ static node_offset_t trim_until(NodeExtentMutable&, internal_sub_items_t&, index_t);
template <KeyT KT>
class Appender;
private:
- size_t num_items;
+ index_t num_items;
const internal_sub_item_t* p_first_item;
};
public:
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
- void append(const internal_sub_items_t& src, size_t from, size_t items);
+ void append(const internal_sub_items_t& src, index_t from, index_t items);
void append(const full_key_t<KT>&, const laddr_packed_t&, const laddr_packed_t*&);
char* wrap() { return p_append; }
private:
const char* p_start() const { return get_item_end(keys()); }
- const node_offset_packed_t& get_offset(size_t index) const {
+ const node_offset_packed_t& get_offset(index_t index) const {
assert(index < keys());
return *(p_offsets - index);
}
- const node_offset_t get_offset_to_end(size_t index) const {
+ const node_offset_t get_offset_to_end(index_t index) const {
assert(index <= keys());
return index == 0 ? 0 : get_offset(index - 1).value;
}
- const char* get_item_start(size_t index) const {
+ const char* get_item_start(index_t index) const {
return p_items_end - get_offset(index).value;
}
- const char* get_item_end(size_t index) const {
+ const char* get_item_end(index_t index) const {
return p_items_end - get_offset_to_end(index);
}
using key_get_type = const snap_gen_t&;
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
num_keys_t keys() const { return *p_num_keys; }
- key_get_type operator[](size_t index) const {
+ key_get_type operator[](index_t index) const {
assert(index < keys());
auto pointer = get_item_end(index);
assert(get_item_start(index) < pointer);
assert(get_item_start(index) < pointer);
return *reinterpret_cast<const snap_gen_t*>(pointer);
}
- node_offset_t size_before(size_t index) const {
+ node_offset_t size_before(index_t index) const {
assert(index <= keys());
size_t ret;
if (index == 0) {
assert(ret < NODE_BLOCK_SIZE);
return ret;
}
- node_offset_t size_overhead_at(size_t index) const { return sizeof(node_offset_t); }
- const onode_t* get_p_value(size_t index) const {
+ node_offset_t size_overhead_at(index_t index) const { return sizeof(node_offset_t); }
+ const onode_t* get_p_value(index_t index) const {
assert(index < keys());
auto pointer = get_item_start(index);
auto value = reinterpret_cast<const onode_t*>(pointer);
static const onode_t* insert_at(
NodeExtentMutable&, const leaf_sub_items_t&,
const full_key_t<KT>&, const onode_t&,
- size_t index, node_offset_t size, const char* p_left_bound);
+ index_t index, node_offset_t size, const char* p_left_bound);
- static node_offset_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, size_t index);
+ static node_offset_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, index_t index);
template <KeyT KT>
class Appender;
const char* p_items_end;
};
-auto constexpr APPENDER_LIMIT = 3u;
+constexpr index_t APPENDER_LIMIT = 3u;
template <KeyT KT>
class leaf_sub_items_t::Appender {
struct range_items_t {
- size_t from;
- size_t items;
+ index_t from;
+ index_t items;
};
struct kv_item_t {
const full_key_t<KT>* p_key;
: p_mut{p_mut}, p_append{p_append} {
}
- void append(const leaf_sub_items_t& src, size_t from, size_t items) {
+ void append(const leaf_sub_items_t& src, index_t from, index_t items) {
assert(cnt <= APPENDER_LIMIT);
assert(from <= src.keys());
if (items == 0) {
NodeExtentMutable* p_mut;
char* p_append;
var_t appends[APPENDER_LIMIT];
- size_t cnt = 0;
+ index_t cnt = 0;
};
template <node_type_t> struct _sub_items_t;