// update pos => left_child to pos => right_child
auto left_child_addr = left_child->impl->laddr();
+ auto left_child_addr_packed = laddr_packed_t{left_child_addr};
auto right_child_addr = right_child->impl->laddr();
impl->replace_child_addr(pos, right_child_addr, left_child_addr);
replace_track(pos, right_child, left_child);
auto free_size = impl->free_size();
if (free_size >= insert_size) {
// insert
- auto p_value = impl->insert(left_key, left_child_addr,
+ auto p_value = impl->insert(left_key, left_child_addr_packed,
insert_pos, insert_stage, insert_size);
assert(impl->free_size() == free_size - insert_size);
assert(insert_pos <= pos);
- assert(*p_value == left_child_addr);
+ assert(p_value->value == left_child_addr);
track_insert(insert_pos, insert_stage, left_child, right_child);
validate_tracked_children();
return node_ertr::now();
insert_pos, insert_stage, insert_size](auto fresh_right) mutable {
auto right_node = fresh_right.node;
auto left_child_addr = left_child->impl->laddr();
+ auto left_child_addr_packed = laddr_packed_t{left_child_addr};
auto [split_pos, is_insert_left, p_value] = impl->split_insert(
- fresh_right.mut, *right_node->impl, left_key, left_child_addr,
+ fresh_right.mut, *right_node->impl, left_key, left_child_addr_packed,
insert_pos, insert_stage, insert_size);
- assert(*p_value == left_child_addr);
+ assert(p_value->value == left_child_addr);
track_split(split_pos, right_node);
if (is_insert_left) {
track_insert(insert_pos, insert_stage, left_child);
).safe_then([c, old_root_addr,
super = std::move(super)](auto fresh_node) mutable {
auto root = fresh_node.node;
- const laddr_t* p_value = root->impl->get_p_value(search_position_t::end());
+ auto p_value = root->impl->get_p_value(search_position_t::end());
fresh_node.mut.copy_in_absolute(
- const_cast<laddr_t*>(p_value), old_root_addr);
+ const_cast<laddr_packed_t*>(p_value), old_root_addr);
root->make_root_from(c, std::move(super), old_root_addr);
return root;
});
node_future<Ref<tree_cursor_t>>
InternalNode::lookup_smallest(context_t c) {
auto position = search_position_t::begin();
- laddr_t child_addr = *impl->get_p_value(position);
+ laddr_t child_addr = impl->get_p_value(position)->value;
return get_or_track_child(c, position, child_addr
).safe_then([c](auto child) {
return child->lookup_smallest(c);
// NOTE: unlike LeafNode::lookup_largest(), this only works for the tail
// internal node to return the tail child address.
auto position = search_position_t::end();
- laddr_t child_addr = *impl->get_p_value(position);
+ laddr_t child_addr = impl->get_p_value(position)->value;
return get_or_track_child(c, position, child_addr).safe_then([c](auto child) {
return child->lookup_largest(c);
});
InternalNode::lower_bound_tracked(
context_t c, const key_hobj_t& key, MatchHistory& history) {
auto result = impl->lower_bound(key, history);
- return get_or_track_child(c, result.position, *result.p_value
+ return get_or_track_child(c, result.position, result.p_value->value
).safe_then([c, &key, &history](auto child) {
// XXX(multi-type): pass result.mstat to child
return child->lower_bound_tracked(c, key, history);
assert(impl->level() - 1 == child.impl->level());
assert(this == child.parent_info().ptr);
auto& child_pos = child.parent_info().position;
- assert(*impl->get_p_value(child_pos) == child.impl->laddr());
+ assert(impl->get_p_value(child_pos)->value == child.impl->laddr());
if (child_pos.is_end()) {
assert(impl->is_level_tail());
assert(child.impl->is_level_tail());
}
void update_child_addr_replayable(
- const laddr_t new_addr, laddr_t* p_addr) {
+ const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(state != state_t::PENDING_MUTATE);
// TODO: encode params to recorder as delta
return layout_t::update_child_addr(*mut, new_addr, p_addr);
virtual ~InternalNodeImpl() = default;
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
- virtual const laddr_t* get_p_value(
+ virtual const laddr_packed_t* get_p_value(
const search_position_t&,
key_view_t* = nullptr, internal_marker_t = {}) const {
assert(false && "impossible path");
assert(false && "impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
- virtual const laddr_t* insert(
- const key_view_t&, const laddr_t&, search_position_t&, match_stage_t&, node_offset_t&) {
+ virtual const laddr_packed_t* insert(
+ const key_view_t&, const laddr_packed_t&, search_position_t&, match_stage_t&, node_offset_t&) {
assert(false && "impossible path");
}
#pragma GCC diagnostic ignored "-Woverloaded-virtual"
- virtual std::tuple<search_position_t, bool, const laddr_t*> split_insert(
- NodeExtentMutable&, NodeImpl&, const key_view_t&, const laddr_t&,
+ virtual std::tuple<search_position_t, bool, const laddr_packed_t*> split_insert(
+ NodeExtentMutable&, NodeImpl&, const key_view_t&, const laddr_packed_t&,
search_position_t&, match_stage_t&, node_offset_t&) {
assert(false && "impossible path");
}
auto value_ptr = node_stage.get_end_p_laddr();
int offset = reinterpret_cast<const char*>(value_ptr) - p_start;
os << "\n tail value: 0x"
- << std::hex << *value_ptr << std::dec
+ << std::hex << value_ptr->value << std::dec
<< " " << size << "B"
<< " @" << offset << "B";
}
void replace_child_addr(
const search_position_t& pos, laddr_t dst, laddr_t src) override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
- const laddr_t* p_value = get_p_value(pos);
- assert(*p_value == src);
- extent.update_child_addr_replayable(dst, const_cast<laddr_t*>(p_value));
+ const laddr_packed_t* p_value = get_p_value(pos);
+ assert(p_value->value == src);
+ extent.update_child_addr_replayable(dst, const_cast<laddr_packed_t*>(p_value));
} else {
assert(false && "impossible path");
}
const key_view_t& key, const laddr_t& value,
search_position_t& insert_pos) const override {
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
+ auto packed_value = laddr_packed_t{value};
auto& node_stage = extent.read();
match_stage_t insert_stage;
node_offset_t insert_size;
if (unlikely(!node_stage.keys())) {
assert(insert_pos.is_end());
insert_stage = STAGE;
- insert_size = STAGE_T::template insert_size<KeyT::VIEW>(key, value);
+ insert_size = STAGE_T::template insert_size<KeyT::VIEW>(key, packed_value);
} else {
std::tie(insert_stage, insert_size) = STAGE_T::evaluate_insert(
- node_stage, key, value, cast_down<STAGE>(insert_pos), true);
+ node_stage, key, packed_value, cast_down<STAGE>(insert_pos), true);
}
return {insert_stage, insert_size};
} else {
}
static void update_child_addr(
- NodeExtentMutable& mut, const laddr_t new_addr, laddr_t* p_addr) {
+ NodeExtentMutable& mut, const laddr_t new_addr, laddr_packed_t* p_addr) {
assert(NODE_TYPE == node_type_t::INTERNAL);
mut.copy_in_absolute(p_addr, new_addr);
}
return os;
}
+struct laddr_packed_t {
+ laddr_t value;
+} __attribute__((packed));
+
}
void next_item_range(const char* p_end) const {
auto p_item_end = p_end - sizeof(node_offset_t);
assert(p_items_start < p_item_end);
- back_offset = *reinterpret_cast<const node_offset_t*>(p_item_end);
+ back_offset = reinterpret_cast<const node_offset_packed_t*>(p_item_end)->value;
assert(back_offset);
const char* p_item_start = p_item_end - back_offset;
assert(p_items_start <= p_item_start);
template <KeyT type>
using full_key_t = typename _full_key_type<type>::type;
+struct node_offset_packed_t {
+ node_offset_t value;
+} __attribute__((packed));
+
// TODO: consider alignments
struct shard_pool_t {
bool operator==(const shard_pool_t& x) const {
assert(p_src);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
if (p_src->is_level_tail()) {
- laddr_t tail_value = *p_src->get_end_p_laddr();
+ laddr_t tail_value = p_src->get_end_p_laddr()->value;
p_append_right -= sizeof(laddr_t);
assert(p_append_left <= p_append_right);
p_mut->copy_in_absolute(p_append_right, tail_value);
size_t total_size() const { return p_fields->total_size(); }
const char* p_left_bound() const;
template <node_type_t T = NODE_TYPE>
- std::enable_if_t<T == node_type_t::INTERNAL, const laddr_t*>
+ std::enable_if_t<T == node_type_t::INTERNAL, const laddr_packed_t*>
get_end_p_laddr() const {
assert(is_level_tail());
if constexpr (FIELD_TYPE == field_type_t::N3) {
- #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
return &p_fields->child_addrs[keys()];
} else {
auto offset_start = p_fields->get_item_end_offset(keys());
assert(offset_start <= FieldType::SIZE);
- offset_start -= sizeof(laddr_t);
+ offset_start -= sizeof(laddr_packed_t);
auto p_addr = p_start() + offset_start;
- return reinterpret_cast<const laddr_t*>(p_addr);
+ return reinterpret_cast<const laddr_packed_t*>(p_addr);
}
}
get_p_value(size_t index) const {
assert(index < keys());
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
- #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
return &p_fields->child_addrs[index];
} else {
auto range = get_nxt_container(index);
node_header_t header;
num_keys_t num_keys = 0u;
snap_gen_t keys[MAX_NUM_KEYS];
- laddr_t child_addrs[MAX_NUM_KEYS];
+ laddr_packed_t child_addrs[MAX_NUM_KEYS];
} __attribute__((packed));
static_assert(_internal_fields_3_t<MAX_NUM_KEYS_I3>::SIZE <= NODE_BLOCK_SIZE &&
_internal_fields_3_t<MAX_NUM_KEYS_I3 + 1>::SIZE > NODE_BLOCK_SIZE);
if constexpr (NODE_TYPE == node_type_t::LEAF) {
os << *value_ptr;
} else {
- os << "0x" << std::hex << *value_ptr << std::dec;
+ os << "0x" << std::hex << value_ptr->value << std::dec;
}
os << " " << size << "B"
<< " @" << offset << "B";
enum class ContainerType { ITERATIVE, INDEXABLE };
template <node_type_t> struct value_type;
-template<> struct value_type<node_type_t::INTERNAL> { using type = laddr_t; };
+template<> struct value_type<node_type_t::INTERNAL> { using type = laddr_packed_t; };
template<> struct value_type<node_type_t::LEAF> { using type = onode_t; };
template <node_type_t NODE_TYPE>
using value_type_t = typename value_type<NODE_TYPE>::type;
namespace crimson::os::seastore::onode {
template <KeyT KT>
-const laddr_t* internal_sub_items_t::insert_at(
+const laddr_packed_t* internal_sub_items_t::insert_at(
NodeExtentMutable& mut, const internal_sub_items_t& sub_items,
- const full_key_t<KT>& key, const laddr_t& value,
+ const full_key_t<KT>& key, const laddr_packed_t& value,
size_t index, node_offset_t size, const char* p_left_bound) {
assert(index <= sub_items.keys());
assert(size == estimate_insert<KT>(key, value));
mut.copy_in_absolute(p_insert, item);
return &reinterpret_cast<internal_sub_item_t*>(p_insert)->value;
}
-template const laddr_t* internal_sub_items_t::insert_at<KeyT::VIEW>(
+template const laddr_packed_t* internal_sub_items_t::insert_at<KeyT::VIEW>(
NodeExtentMutable&, const internal_sub_items_t&, const full_key_t<KeyT::VIEW>&,
- const laddr_t&, size_t, node_offset_t, const char*);
+ const laddr_packed_t&, size_t, node_offset_t, const char*);
size_t internal_sub_items_t::trim_until(
NodeExtentMutable&, internal_sub_items_t& items, size_t index) {
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
- const full_key_t<KT>& key, const laddr_t& value, const laddr_t*& p_value) {
- assert(pp_value == nullptr);
+ const full_key_t<KT>& key, const laddr_packed_t& value,
+ const laddr_packed_t*& p_value) {
p_append -= sizeof(internal_sub_item_t);
auto item = internal_sub_item_t{snap_gen_t::from_key<KT>(key), value};
p_mut->copy_in_absolute(p_append, item);
// c. compensate affected offsets
auto item_size = value.size + sizeof(snap_gen_t);
for (auto i = index; i < sub_items.keys(); ++i) {
- const node_offset_t& offset_i = sub_items.get_offset(i);
- mut.copy_in_absolute((void*)&offset_i, node_offset_t(offset_i + item_size));
+ const node_offset_packed_t& offset_i = sub_items.get_offset(i);
+ mut.copy_in_absolute((void*)&offset_i, node_offset_t(offset_i.value + item_size));
}
// d. [item(index-1) ... item(0) ... offset(index)] <<< sizeof(node_offset_t)
int compensate = (last_offset - op_src->get_offset_to_end(arg.from));
node_offset_t offset;
for (auto i = arg.from; i < arg.from + arg.items; ++i) {
- offset = op_src->get_offset(i) + compensate;
+ offset = op_src->get_offset(i).value + compensate;
p_cur -= sizeof(node_offset_t);
p_mut->copy_in_absolute(p_cur, offset);
}
struct internal_sub_item_t {
const snap_gen_t& get_key() const { return key; }
- #pragma GCC diagnostic ignored "-Waddress-of-packed-member"
- const laddr_t* get_p_value() const { return &value; }
+ const laddr_packed_t* get_p_value() const { return &value; }
snap_gen_t key;
- laddr_t value;
+ laddr_packed_t value;
} __attribute__((packed));
/*
size_t size_before(size_t index) const {
return index * sizeof(internal_sub_item_t);
}
- const laddr_t* get_p_value(size_t index) const {
+ const laddr_packed_t* get_p_value(size_t index) const {
assert(index < num_items);
return (p_first_item - index)->get_p_value();
}
static node_offset_t header_size() { return 0u; }
template <KeyT KT>
- static node_offset_t estimate_insert(const full_key_t<KT>&, const laddr_t&) {
+ static node_offset_t estimate_insert(
+ const full_key_t<KT>&, const laddr_packed_t&) {
return sizeof(internal_sub_item_t);
}
template <KeyT KT>
- static const laddr_t* insert_at(
+ static const laddr_packed_t* insert_at(
NodeExtentMutable&, const internal_sub_items_t&,
- const full_key_t<KT>&, const laddr_t&,
+ const full_key_t<KT>&, const laddr_packed_t&,
size_t index, node_offset_t size, const char* p_left_bound);
static size_t trim_until(NodeExtentMutable&, internal_sub_items_t&, size_t);
Appender(NodeExtentMutable* p_mut, char* p_append)
: p_mut{p_mut}, p_append{p_append} {}
void append(const internal_sub_items_t& src, size_t from, size_t items);
- void append(const full_key_t<KT>&, const laddr_t&, const laddr_t*&);
+ void append(const full_key_t<KT>&, const laddr_packed_t&, const laddr_packed_t*&);
char* wrap() { return p_append; }
private:
- const laddr_t** pp_value = nullptr;
NodeExtentMutable* p_mut;
char* p_append;
};
assert(keys());
auto _p_offsets = _p_num_keys - sizeof(node_offset_t);
assert(range.p_start < _p_offsets);
- p_offsets = reinterpret_cast<const node_offset_t*>(_p_offsets);
+ p_offsets = reinterpret_cast<const node_offset_packed_t*>(_p_offsets);
p_items_end = reinterpret_cast<const char*>(&get_offset(keys() - 1));
assert(range.p_start < p_items_end);
assert(range.p_start == p_start());
const char* p_start() const { return get_item_end(keys()); }
- const node_offset_t& get_offset(size_t index) const {
+ const node_offset_packed_t& get_offset(size_t index) const {
assert(index < keys());
return *(p_offsets - index);
}
const node_offset_t get_offset_to_end(size_t index) const {
assert(index <= keys());
- return index == 0 ? 0 : get_offset(index - 1);
+ return index == 0 ? 0 : get_offset(index - 1).value;
}
const char* get_item_start(size_t index) const {
- return p_items_end - get_offset(index);
+ return p_items_end - get_offset(index).value;
}
const char* get_item_end(size_t index) const {
--index;
auto ret = sizeof(num_keys_t) +
(index + 1) * sizeof(node_offset_t) +
- get_offset(index);
+ get_offset(index).value;
return ret;
}
const onode_t* get_p_value(size_t index) const {
private:
// TODO: support unaligned access
const num_keys_t* p_num_keys;
- const node_offset_t* p_offsets;
+ const node_offset_packed_t* p_offsets;
const char* p_items_end;
};
onode_t value = {2};
#define _STAGE_T(NodeType) node_to_stage_t<typename NodeType::node_stage_t>
#define NXT_T(StageType) staged<typename StageType::next_param_t>
+ laddr_packed_t i_value{0};
logger().info("\n"
"Bytes of a key-value insertion (full-string):\n"
" s-p-c, 'n'-'o', s-g => onode_t(2): typically internal 41B, leaf 35B\n"
" LeafNode1: {} {} {}\n"
" LeafNode2: {} {}\n"
" LeafNode3: {}",
- _STAGE_T(InternalNode0)::template insert_size<KeyT::VIEW>(key_view, 0),
- NXT_T(_STAGE_T(InternalNode0))::template insert_size<KeyT::VIEW>(key_view, 0),
- NXT_T(NXT_T(_STAGE_T(InternalNode0)))::template insert_size<KeyT::VIEW>(key_view, 0),
- _STAGE_T(InternalNode1)::template insert_size<KeyT::VIEW>(key_view, 0),
- NXT_T(_STAGE_T(InternalNode1))::template insert_size<KeyT::VIEW>(key_view, 0),
- NXT_T(NXT_T(_STAGE_T(InternalNode1)))::template insert_size<KeyT::VIEW>(key_view, 0),
- _STAGE_T(InternalNode2)::template insert_size<KeyT::VIEW>(key_view, 0),
- NXT_T(_STAGE_T(InternalNode2))::template insert_size<KeyT::VIEW>(key_view, 0),
- _STAGE_T(InternalNode3)::template insert_size<KeyT::VIEW>(key_view, 0),
+ _STAGE_T(InternalNode0)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode0))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(NXT_T(_STAGE_T(InternalNode0)))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode1)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode1))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(NXT_T(_STAGE_T(InternalNode1)))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode2)::template insert_size<KeyT::VIEW>(key_view, i_value),
+ NXT_T(_STAGE_T(InternalNode2))::template insert_size<KeyT::VIEW>(key_view, i_value),
+ _STAGE_T(InternalNode3)::template insert_size<KeyT::VIEW>(key_view, i_value),
_STAGE_T(LeafNode0)::template insert_size<KeyT::HOBJ>(key, value),
NXT_T(_STAGE_T(LeafNode0))::template insert_size<KeyT::HOBJ>(key, value),
NXT_T(NXT_T(_STAGE_T(LeafNode0)))::template insert_size<KeyT::HOBJ>(key, value),