if (node_stage.keys()) {
STAGE_T::dump(node_stage, os, " ", size, p_start);
} else {
- if constexpr (NODE_TYPE == node_type_t::LEAF) {
- return os << " empty!";
- } else { // internal node
- if (!node_stage.is_level_tail()) {
- return os << " empty!";
- } else {
- size += node_stage_t::header_size();
- }
+ size += node_stage_t::header_size();
+ if (NODE_TYPE == node_type_t::LEAF || !node_stage.is_level_tail()) {
+ os << " empty!";
}
}
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
<< " @" << offset << "B";
}
}
+ assert(size == filled_size());
return os;
}
<< "+" << node_stage_t::EXTENT_SIZE << std::dec
<< (node_stage.is_level_tail() ? "$" : "")
<< "(level=" << (unsigned)node_stage.level()
- << ", filled=" << node_stage.total_size() - node_stage.free_size() << "B"
+ << ", filled=" << filled_size() << "B"
<< ", free=" << node_stage.free_size() << "B"
<< ")";
return os;
auto& node_stage = extent.read();
typename STAGE_T::StagedIterator split_at;
bool is_insert_left;
+ size_t split_size = 0;
{
size_t empty_size = node_stage.size_before(0);
- size_t total_size = node_stage.total_size();
- size_t available_size = total_size - empty_size;
- size_t filled_size = total_size - node_stage.free_size() - empty_size;
+ size_t filled_kv_size = filled_size() - empty_size;
/** NODE_BLOCK_SIZE considerations
*
* Generally,
*
* (TODO) Implement smarter logics to check when "double split" happens.
*/
- size_t target_split_size = empty_size + (filled_size + insert_size) / 2;
- assert(insert_size < available_size / 2);
+ size_t target_split_size = empty_size + (filled_kv_size + insert_size) / 2;
+ assert(insert_size < (node_stage.total_size() - empty_size) / 2);
- size_t split_size = 0;
std::optional<bool> _is_insert_left;
split_at.set(node_stage);
bool locate_nxt = STAGE_T::recursively_locate_split_inserted(
<< "), is_insert_left=" << is_insert_left
<< ", estimated_split_size=" << split_size
<< "(target=" << target_split_size
- << ", current=" << node_stage.size_before(node_stage.keys())
+ << ", current=" << filled_size()
<< ")" << std::endl;
// split_size can be larger than target_split_size in strategy B
// assert(split_size <= target_split_size);
<< "), insert_stage=" << (int)insert_stage
<< ", insert_size=" << insert_size
<< std::endl << std::endl;
+ assert(split_size == filled_size());
return {split_pos, is_insert_left, p_value};
}
NodeLayoutT(typename extent_t::state_t state, NodeExtentRef extent)
: extent{state, extent} {}
+ node_offset_t filled_size() const {
+ auto& node_stage = extent.read();
+ auto ret = node_stage.size_before(node_stage.keys());
+ assert(ret == node_stage.total_size() - node_stage.free_size());
+ return ret;
+ }
+
extent_t extent;
};
void ITER_T::update_size(
NodeExtentMutable& mut, const ITER_T& iter, int change) {
node_offset_t offset = iter.get_back_offset();
- assert(change + offset > 0);
- assert(change + offset < NODE_BLOCK_SIZE);
+ int new_size = change + offset;
+ assert(new_size > 0 && new_size < NODE_BLOCK_SIZE);
mut.copy_in_absolute(
- (void*)iter.get_item_range().p_end, node_offset_t(offset + change));
+ (void*)iter.get_item_range().p_end, node_offset_t(new_size));
}
template <node_type_t NODE_TYPE>
-size_t ITER_T::trim_until(NodeExtentMutable&, const ITER_T& iter) {
+node_offset_t ITER_T::trim_until(NodeExtentMutable&, const ITER_T& iter) {
assert(iter.index() != 0);
- return iter.p_end() - iter.p_items_start;
+ size_t ret = iter.p_end() - iter.p_items_start;
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
}
template <node_type_t NODE_TYPE>
-size_t ITER_T::trim_at(
- NodeExtentMutable& mut, const ITER_T& iter, size_t trimmed) {
+node_offset_t ITER_T::trim_at(
+ NodeExtentMutable& mut, const ITER_T& iter, node_offset_t trimmed) {
size_t trim_size = iter.p_start() - iter.p_items_start + trimmed;
+ assert(trim_size < NODE_BLOCK_SIZE);
assert(iter.get_back_offset() > trimmed);
node_offset_t new_offset = iter.get_back_offset() - trimmed;
mut.copy_in_absolute((void*)iter.item_range.p_end, new_offset);
}
return *key;
}
- size_t size() const {
- return item_range.p_end - item_range.p_start + sizeof(node_offset_t);
+ node_offset_t size() const {
+ size_t ret = item_range.p_end - item_range.p_start + sizeof(node_offset_t);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
};
- size_t size_to_nxt() const {
- return get_key().size() + sizeof(node_offset_t);
+ node_offset_t size_to_nxt() const {
+ size_t ret = get_key().size() + sizeof(node_offset_t);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
}
memory_range_t get_nxt_container() const {
return {item_range.p_start, get_key().p_start()};
static void update_size(
NodeExtentMutable& mut, const item_iterator_t<NODE_TYPE>& iter, int change);
- static size_t trim_until(NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&);
- static size_t trim_at(
- NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, size_t trimmed);
+ static node_offset_t trim_until(NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&);
+ static node_offset_t trim_at(
+ NodeExtentMutable&, const item_iterator_t<NODE_TYPE>&, node_offset_t trimmed);
template <KeyT KT>
class Appender;
return p_length + sizeof(string_size_t);
}
}
- size_t size() const { return length + sizeof(string_size_t); }
+ node_offset_t size() const {
+ size_t ret = length + sizeof(string_size_t);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
+ }
std::string_view to_string_view() const {
assert(type() == Type::STR);
return {p_key, length};
ns_oid_view_t(const char* p_end) : nspace(p_end), oid(nspace.p_next_end()) {}
Type type() const { return oid.type(); }
const char* p_start() const { return oid.p_start(); }
- size_t size() const {
+ node_offset_t size() const {
if (type() == Type::STR) {
- return nspace.size() + oid.size();
+ size_t ret = nspace.size() + oid.size();
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
} else {
return sizeof(string_size_t);
}
}
template <typename FieldType, node_type_t NODE_TYPE>
-size_t NODE_T::size_to_nxt_at(size_t index) const {
+node_offset_t NODE_T::size_to_nxt_at(size_t index) const {
assert(index < keys());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
}
template <typename FieldType, node_type_t NODE_TYPE>
-size_t NODE_T::trim_until(
+node_offset_t NODE_T::trim_until(
NodeExtentMutable& mut, const node_extent_t& node, size_t index) {
assert(!node.is_level_tail());
auto keys = node.keys();
}
template <typename FieldType, node_type_t NODE_TYPE>
-size_t NODE_T::trim_at(
- NodeExtentMutable& mut, const node_extent_t& node, size_t index, size_t trimmed) {
+node_offset_t NODE_T::trim_at(
+ NodeExtentMutable& mut, const node_extent_t& node,
+ size_t index, node_offset_t trimmed) {
assert(!node.is_level_tail());
auto keys = node.keys();
assert(index < keys);
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
assert(false && "not implemented");
} else {
- auto offset = node.p_fields->get_item_start_offset(index);
- assert(offset + trimmed < node.p_fields->get_item_end_offset(index));
+ node_offset_t offset = node.p_fields->get_item_start_offset(index);
+ size_t new_offset = offset + trimmed;
+ assert(new_offset < node.p_fields->get_item_end_offset(index));
mut.copy_in_absolute(const_cast<void*>(node.p_fields->p_offset(index)),
- node_offset_t(offset + trimmed));
+ node_offset_t(new_offset));
mut.copy_in_absolute(
(void*)&node.p_fields->num_keys, num_keys_t(index + 1));
}
bool is_level_tail() const { return p_fields->is_level_tail(); }
level_t level() const { return p_fields->header.level; }
- size_t free_size() const {
+ node_offset_t free_size() const {
return p_fields->template free_size_before<NODE_TYPE>(keys());
}
- size_t total_size() const { return p_fields->total_size(); }
+ node_offset_t total_size() const { return p_fields->total_size(); }
const char* p_left_bound() const;
template <node_type_t T = NODE_TYPE>
std::enable_if_t<T == node_type_t::INTERNAL, const laddr_packed_t*>
static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
size_t keys() const { return p_fields->num_keys; }
key_get_type operator[] (size_t index) const { return p_fields->get_key(index); }
- size_t size_before(size_t index) const {
+ node_offset_t size_before(size_t index) const {
auto free_size = p_fields->template free_size_before<NODE_TYPE>(index);
assert(total_size() >= free_size);
return total_size() - free_size;
}
- size_t size_to_nxt_at(size_t index) const;
+ node_offset_t size_to_nxt_at(size_t index) const;
memory_range_t get_nxt_container(size_t index) const;
template <typename T = FieldType>
static void update_size_at(
NodeExtentMutable&, const node_extent_t&, size_t index, int change);
- static size_t trim_until(NodeExtentMutable&, const node_extent_t&, size_t index);
- static size_t trim_at(NodeExtentMutable&, const node_extent_t&,
- size_t index, size_t trimmed);
+ static node_offset_t trim_until(
+ NodeExtentMutable&, const node_extent_t&, size_t index);
+ static node_offset_t trim_at(NodeExtentMutable&, const node_extent_t&,
+ size_t index, node_offset_t trimmed);
template <KeyT KT>
class Appender;
sizeof(node_header_t) + sizeof(num_keys_t);
bool is_level_tail() const { return header.get_is_level_tail(); }
- size_t total_size() const { return SIZE; }
+ node_offset_t total_size() const { return SIZE; }
key_get_type get_key(size_t index) const {
assert(index < num_keys);
return slots[index].key;
sizeof(node_header_t) + sizeof(num_keys_t);
bool is_level_tail() const { return header.get_is_level_tail(); }
- size_t total_size() const { return SIZE; }
+ node_offset_t total_size() const { return SIZE; }
key_get_type get_key(size_t index) const {
assert(index < num_keys);
node_offset_t item_end_offset =
sizeof(node_header_t) + sizeof(num_keys_t);
bool is_level_tail() const { return header.get_is_level_tail(); }
- size_t total_size() const {
+ node_offset_t total_size() const {
if (is_level_tail()) {
return SIZE - sizeof(snap_gen_t);
} else {
* operator[](size_t) const -> key_get_type
* size_before(size_t) const -> size_t
* (IS_BOTTOM) get_p_value(size_t) const -> const value_t*
- * (!IS_BOTTOM) size_to_nxt_at(size_t) const -> size_t
+ * (!IS_BOTTOM) size_to_nxt_at(size_t) const -> node_offset_t
* (!IS_BOTTOM) get_nxt_container(size_t) const
* static:
* header_size() -> node_offset_t
assert(!is_end());
return container[_index];
}
- size_t size_to_nxt() const {
+ node_offset_t size_to_nxt() const {
assert(!is_end());
return container.size_to_nxt_at(_index);
}
return _index + 1 == container.keys();
}
bool is_end() const { return _index == container.keys(); }
- size_t size() const {
+ node_offset_t size() const {
assert(!is_end());
assert(header_size() == container.size_before(0));
+ assert(container.size_before(_index + 1) > container.size_before(_index));
return container.size_before(_index + 1) -
container.size_before(_index);
}
}
}
- size_t trim_until(NodeExtentMutable& mut) {
+ node_offset_t trim_until(NodeExtentMutable& mut) {
return container_t::trim_until(mut, container, _index);
}
- template <typename T = size_t>
+ template <typename T = node_offset_t>
std::enable_if_t<!IS_BOTTOM, T>
- trim_at(NodeExtentMutable& mut, size_t trimmed) {
+ trim_at(NodeExtentMutable& mut, node_offset_t trimmed) {
return container_t::trim_at(mut, container, _index, trimmed);
}
}
template <KeyT KT>
- static size_t estimate_insert(const full_key_t<KT>& key, const value_t& value) {
+ static node_offset_t estimate_insert(
+ const full_key_t<KT>& key, const value_t& value) {
return container_t::template estimate_insert<KT>(key, value);
}
* CONTAINER_TYPE = ContainerType::ITERATIVE
* index() const -> size_t
* get_key() const -> key_get_type
- * size() const -> size_t
- * size_to_nxt() const -> size_t
+ * size() const -> node_offset_t
+ * size_to_nxt() const -> node_offset_t
* get_nxt_container() const
* has_next() const -> bool
* operator++()
assert(!is_end());
return container.get_key();
}
- size_t size_to_nxt() const {
+ node_offset_t size_to_nxt() const {
assert(!is_end());
return container.size_to_nxt();
}
return !container.has_next();
}
bool is_end() const { return _is_end; }
- size_t size() const {
+ node_offset_t size() const {
assert(!is_end());
return container.size();
}
to_index = index();
}
- size_t trim_until(NodeExtentMutable& mut) {
+ node_offset_t trim_until(NodeExtentMutable& mut) {
if (is_end()) {
return 0;
}
return container_t::trim_until(mut, container);
}
- size_t trim_at(NodeExtentMutable& mut, size_t trimmed) {
+ node_offset_t trim_at(NodeExtentMutable& mut, node_offset_t trimmed) {
assert(!is_end());
return container_t::trim_at(mut, container, trimmed);
}
* get_key() -> key_get_type (const reference or value type)
* is_last() -> bool
* is_end() -> bool
- * size() -> size_t
+ * size() -> node_offset_t
* (IS_BOTTOM) get_p_value() -> const value_t*
* (!IS_BOTTOM) get_nxt_container() -> nxt_stage::container_t
- * (!IS_BOTTOM) size_to_nxt() -> size_t
+ * (!IS_BOTTOM) size_to_nxt() -> node_offset_t
* seek:
* operator++() -> iterator_t&
* seek_at(index)
assert(is_insert_left.has_value());
assert(current_size <= target_size);
if (split_iter.index() == 0) {
- extra_size += iterator_t::header_size();
+ if (insert_index == 0) {
+ if (*is_insert_left == false) {
+ extra_size += iterator_t::header_size();
+ } else {
+ extra_size = 0;
+ }
+ } else {
+ extra_size += iterator_t::header_size();
+ }
} else {
extra_size = 0;
}
* AT: trim happens in the current container, and the according higher
* stage iterator needs to be adjusted by the trimmed size.
*/
- static std::tuple<TrimType, size_t>
+ static std::tuple<TrimType, node_offset_t>
recursively_trim(NodeExtentMutable& mut, StagedIterator& trim_at) {
if (!trim_at.valid()) {
return {TrimType::BEFORE, 0u};
if constexpr (!IS_BOTTOM) {
auto [type, trimmed] = NXT_STAGE_T::recursively_trim(
mut, trim_at.get_nxt());
- size_t trim_size;
+ node_offset_t trim_size;
if (type == TrimType::AFTER) {
if (iter.is_last()) {
return {TrimType::AFTER, 0u};
NodeExtentMutable&, const internal_sub_items_t&, const full_key_t<KeyT::VIEW>&,
const laddr_packed_t&, size_t, node_offset_t, const char*);
-size_t internal_sub_items_t::trim_until(
+node_offset_t internal_sub_items_t::trim_until(
NodeExtentMutable&, internal_sub_items_t& items, size_t index) {
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
- return sizeof(internal_sub_item_t) * (keys - index);
+ size_t ret = sizeof(internal_sub_item_t) * (keys - index);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
}
template class internal_sub_items_t::Appender<KeyT::VIEW>;
NodeExtentMutable&, const leaf_sub_items_t&, const full_key_t<KeyT::HOBJ>&,
const onode_t&, size_t, node_offset_t, const char*);
-size_t leaf_sub_items_t::trim_until(
+node_offset_t leaf_sub_items_t::trim_until(
NodeExtentMutable& mut, leaf_sub_items_t& items, size_t index) {
assert(index != 0);
auto keys = items.keys();
mut.shift_absolute(p_shift_start, p_shift_end - p_shift_start,
size_trim_offsets);
mut.copy_in_absolute((void*)items.p_num_keys, num_keys_t(index));
- return size_trim_offsets + (p_shift_start - p_items_start);
+ size_t ret = size_trim_offsets + (p_shift_start - p_items_start);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
}
// helper type for the visitor
assert(index < num_items);
return (p_first_item - index)->get_key();
}
- size_t size_before(size_t index) const {
- return index * sizeof(internal_sub_item_t);
+ node_offset_t size_before(size_t index) const {
+ size_t ret = index * sizeof(internal_sub_item_t);
+ assert(ret < NODE_BLOCK_SIZE);
+ return ret;
}
const laddr_packed_t* get_p_value(size_t index) const {
assert(index < num_items);
const full_key_t<KT>&, const laddr_packed_t&,
size_t index, node_offset_t size, const char* p_left_bound);
- static size_t trim_until(NodeExtentMutable&, internal_sub_items_t&, size_t);
+ static node_offset_t trim_until(NodeExtentMutable&, internal_sub_items_t&, size_t);
template <KeyT KT>
class Appender;
assert(get_item_start(index) < pointer);
return *reinterpret_cast<const snap_gen_t*>(pointer);
}
- size_t size_before(size_t index) const {
+ node_offset_t size_before(size_t index) const {
assert(index <= keys());
+ size_t ret;
if (index == 0) {
- return sizeof(num_keys_t);
+ ret = sizeof(num_keys_t);
+ } else {
+ --index;
+ ret = sizeof(num_keys_t) +
+ (index + 1) * sizeof(node_offset_t) +
+ get_offset(index).value;
}
- --index;
- auto ret = sizeof(num_keys_t) +
- (index + 1) * sizeof(node_offset_t) +
- get_offset(index).value;
+ assert(ret < NODE_BLOCK_SIZE);
return ret;
}
const onode_t* get_p_value(size_t index) const {
const full_key_t<KT>&, const onode_t&,
size_t index, node_offset_t size, const char* p_left_bound);
- static size_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, size_t index);
+ static node_offset_t trim_until(NodeExtentMutable&, leaf_sub_items_t&, size_t index);
template <KeyT KT>
class Appender;