#include "stages/node_stage_layout.h"
namespace {
- seastar::logger& logger() {
- return crimson::get_logger(ceph_subsys_filestore);
- }
+
+seastar::logger& logger()
+{
+ return crimson::get_logger(ceph_subsys_filestore);
+}
+
}
namespace crimson::os::seastore::onode {
*/
tree_cursor_t::tree_cursor_t(Ref<LeafNode> node, const search_position_t& pos)
- : ref_leaf_node{node}, position{pos} {
+ : ref_leaf_node{node}, position{pos}
+{
assert(!is_end());
ref_leaf_node->do_track_cursor<true>(*this);
}
tree_cursor_t::tree_cursor_t(
Ref<LeafNode> node, const search_position_t& pos,
const key_view_t& key_view, const value_header_t* p_value_header)
- : ref_leaf_node{node}, position{pos} {
+ : ref_leaf_node{node}, position{pos}
+{
assert(!is_end());
update_cache(*node, key_view, p_value_header);
ref_leaf_node->do_track_cursor<true>(*this);
}
tree_cursor_t::tree_cursor_t(Ref<LeafNode> node)
- : ref_leaf_node{node}, position{search_position_t::end()} {
+ : ref_leaf_node{node}, position{search_position_t::end()}
+{
assert(is_end());
assert(ref_leaf_node->is_level_tail());
}
-tree_cursor_t::~tree_cursor_t() {
+tree_cursor_t::~tree_cursor_t()
+{
if (!is_end()) {
ref_leaf_node->do_untrack_cursor(*this);
}
}
-node_future<> tree_cursor_t::extend_value(context_t c, value_size_t extend_size) {
+node_future<> tree_cursor_t::extend_value(context_t c, value_size_t extend_size)
+{
return ref_leaf_node->extend_value(c, position, extend_size);
}
-node_future<> tree_cursor_t::trim_value(context_t c, value_size_t trim_size) {
+node_future<> tree_cursor_t::trim_value(context_t c, value_size_t trim_size)
+{
return ref_leaf_node->trim_value(c, position, trim_size);
}
template <bool VALIDATE>
void tree_cursor_t::update_track(
- Ref<LeafNode> node, const search_position_t& pos) {
+ Ref<LeafNode> node, const search_position_t& pos)
+{
// the cursor must be already untracked
// track the new node and new pos
assert(!pos.is_end());
void tree_cursor_t::update_cache(LeafNode& node,
const key_view_t& key_view,
- const value_header_t* p_value_header) const {
+ const value_header_t* p_value_header) const
+{
assert(!is_end());
assert(ref_leaf_node.get() == &node);
cache.update(node, key_view, p_value_header);
cache.validate_is_latest(node, position);
}
-void tree_cursor_t::maybe_update_cache(value_magic_t magic) const {
+void tree_cursor_t::maybe_update_cache(value_magic_t magic) const
+{
assert(!is_end());
if (!cache.is_latest()) {
auto [key_view, p_value_header] = ref_leaf_node->get_kv(position);
tree_cursor_t::Cache::Cache() = default;
-bool tree_cursor_t::Cache::is_latest() const {
+bool tree_cursor_t::Cache::is_latest() const
+{
return (valid && (version == p_leaf_node->get_layout_version()));
}
void tree_cursor_t::Cache::update(LeafNode& node,
const key_view_t& _key_view,
- const value_header_t* _p_value_header) {
+ const value_header_t* _p_value_header)
+{
assert(_p_value_header);
p_leaf_node = &node;
version = node.get_layout_version();
}
void tree_cursor_t::Cache::validate_is_latest(const LeafNode& node,
- const search_position_t& pos) const {
+ const search_position_t& pos) const
+{
assert(p_leaf_node == &node);
assert(is_latest());
#ifndef NDEBUG
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
-tree_cursor_t::Cache::prepare_mutate_value_payload(context_t c) {
+tree_cursor_t::Cache::prepare_mutate_value_payload(context_t c)
+{
assert(is_latest());
assert(p_leaf_node && p_value_header);
assert(p_value_header->magic == c.vb.get_header_magic());
Node::Node(NodeImplURef&& impl) : impl{std::move(impl)} {}
-Node::~Node() {
+Node::~Node()
+{
// XXX: tolerate failure between allocate() and as_child()
if (is_root()) {
super->do_untrack_root(*this);
}
}
-level_t Node::level() const {
+level_t Node::level() const
+{
return impl->level();
}
node_future<Node::search_result_t> Node::lower_bound(
- context_t c, const key_hobj_t& key) {
+ context_t c, const key_hobj_t& key)
+{
return seastar::do_with(
MatchHistory(), [this, c, &key](auto& history) {
return lower_bound_tracked(c, key, history);
}
node_future<std::pair<Ref<tree_cursor_t>, bool>> Node::insert(
- context_t c, const key_hobj_t& key, value_config_t vconf) {
+ context_t c, const key_hobj_t& key, value_config_t vconf)
+{
return seastar::do_with(
MatchHistory(), [this, c, &key, vconf](auto& history) {
return lower_bound_tracked(c, key, history
);
}
-node_future<tree_stats_t> Node::get_tree_stats(context_t c) {
+node_future<tree_stats_t> Node::get_tree_stats(context_t c)
+{
return seastar::do_with(
tree_stats_t(), [this, c](auto& stats) {
return do_get_tree_stats(c, stats).safe_then([&stats] {
);
}
-std::ostream& Node::dump(std::ostream& os) const {
+std::ostream& Node::dump(std::ostream& os) const
+{
return impl->dump(os);
}
-std::ostream& Node::dump_brief(std::ostream& os) const {
+std::ostream& Node::dump_brief(std::ostream& os) const
+{
return impl->dump_brief(os);
}
void Node::test_make_destructable(
- context_t c, NodeExtentMutable& mut, Super::URef&& _super) {
+ context_t c, NodeExtentMutable& mut, Super::URef&& _super)
+{
impl->test_set_tail(mut);
make_root(c, std::move(_super));
}
-node_future<> Node::mkfs(context_t c, RootNodeTracker& root_tracker) {
+node_future<> Node::mkfs(context_t c, RootNodeTracker& root_tracker)
+{
return LeafNode::allocate_root(c, root_tracker
).safe_then([](auto ret) { /* FIXME: discard_result(); */ });
}
-node_future<Ref<Node>> Node::load_root(context_t c, RootNodeTracker& root_tracker) {
+node_future<Ref<Node>> Node::load_root(context_t c, RootNodeTracker& root_tracker)
+{
return c.nm.get_super(c.t, root_tracker
).safe_then([c, &root_tracker](auto&& _super) {
auto root_addr = _super->get_root_laddr();
});
}
-void Node::make_root(context_t c, Super::URef&& _super) {
+void Node::make_root(context_t c, Super::URef&& _super)
+{
_super->write_root_laddr(c, impl->laddr());
as_root(std::move(_super));
}
-void Node::as_root(Super::URef&& _super) {
+void Node::as_root(Super::URef&& _super)
+{
assert(!super && !_parent_info);
assert(_super->get_root_laddr() == impl->laddr());
assert(impl->is_level_tail());
super->do_track_root(*this);
}
-node_future<> Node::upgrade_root(context_t c) {
+node_future<> Node::upgrade_root(context_t c)
+{
assert(is_root());
assert(impl->is_level_tail());
assert(impl->field_type() == field_type_t::N0);
}
template <bool VALIDATE>
-void Node::as_child(const search_position_t& pos, Ref<InternalNode> parent_node) {
+void Node::as_child(const search_position_t& pos, Ref<InternalNode> parent_node)
+{
assert(!super);
_parent_info = parent_info_t{pos, parent_node};
parent_info().ptr->do_track_child<VALIDATE>(*this);
template void Node::as_child<true>(const search_position_t&, Ref<InternalNode>);
template void Node::as_child<false>(const search_position_t&, Ref<InternalNode>);
-node_future<> Node::insert_parent(context_t c, Ref<Node> right_node) {
+node_future<> Node::insert_parent(context_t c, Ref<Node> right_node)
+{
assert(!is_root());
// TODO(cross-node string dedup)
return parent_info().ptr->apply_child_split(
}
node_future<Ref<Node>> Node::load(
- context_t c, laddr_t addr, bool expect_is_level_tail) {
+ context_t c, laddr_t addr, bool expect_is_level_tail)
+{
// NOTE:
// *option1: all types of node have the same length;
// option2: length is defined by node/field types;
node_future<> InternalNode::apply_child_split(
context_t c, const search_position_t& pos,
- Ref<Node> left_child, Ref<Node> right_child) {
+ Ref<Node> left_child, Ref<Node> right_child)
+{
#ifndef NDEBUG
if (pos.is_end()) {
assert(impl->is_level_tail());
node_future<Ref<InternalNode>> InternalNode::allocate_root(
context_t c, level_t old_root_level,
- laddr_t old_root_addr, Super::URef&& super) {
+ laddr_t old_root_addr, Super::URef&& super)
+{
return InternalNode::allocate(c, field_type_t::N0, true, old_root_level + 1
).safe_then([c, old_root_addr,
super = std::move(super)](auto fresh_node) mutable {
}
node_future<Ref<tree_cursor_t>>
-InternalNode::lookup_smallest(context_t c) {
+InternalNode::lookup_smallest(context_t c)
+{
auto position = search_position_t::begin();
laddr_t child_addr = impl->get_p_value(position)->value;
return get_or_track_child(c, position, child_addr
}
node_future<Ref<tree_cursor_t>>
-InternalNode::lookup_largest(context_t c) {
+InternalNode::lookup_largest(context_t c)
+{
// NOTE: unlike LeafNode::lookup_largest(), this only works for the tail
// internal node to return the tail child address.
auto position = search_position_t::end();
node_future<Node::search_result_t>
InternalNode::lower_bound_tracked(
- context_t c, const key_hobj_t& key, MatchHistory& history) {
+ context_t c, const key_hobj_t& key, MatchHistory& history)
+{
auto result = impl->lower_bound(key, history);
return get_or_track_child(c, result.position, result.p_value->value
).safe_then([c, &key, &history](auto child) {
}
node_future<> InternalNode::do_get_tree_stats(
- context_t c, tree_stats_t& stats) {
+ context_t c, tree_stats_t& stats)
+{
auto nstats = impl->get_stats();
stats.size_persistent_internal += nstats.size_persistent;
stats.size_filled_internal += nstats.size_filled;
}
node_future<> InternalNode::test_clone_root(
- context_t c_other, RootNodeTracker& tracker_other) const {
+ context_t c_other, RootNodeTracker& tracker_other) const
+{
assert(is_root());
assert(impl->is_level_tail());
assert(impl->field_type() == field_type_t::N0);
}
node_future<Ref<Node>> InternalNode::get_or_track_child(
- context_t c, const search_position_t& position, laddr_t child_addr) {
+ context_t c, const search_position_t& position, laddr_t child_addr)
+{
bool level_tail = position.is_end();
Ref<Node> child;
auto found = tracked_child_nodes.find(position);
void InternalNode::track_insert(
const search_position_t& insert_pos, match_stage_t insert_stage,
- Ref<Node> insert_child, Ref<Node> nxt_child) {
+ Ref<Node> insert_child, Ref<Node> nxt_child)
+{
// update tracks
auto pos_upper_bound = insert_pos;
pos_upper_bound.index_by_stage(insert_stage) = INDEX_UPPER_BOUND;
}
void InternalNode::replace_track(
- const search_position_t& position, Ref<Node> new_child, Ref<Node> old_child) {
+ const search_position_t& position, Ref<Node> new_child, Ref<Node> old_child)
+{
assert(tracked_child_nodes[position] == old_child);
tracked_child_nodes.erase(position);
new_child->as_child(position, this);
}
void InternalNode::track_split(
- const search_position_t& split_pos, Ref<InternalNode> right_node) {
+ const search_position_t& split_pos, Ref<InternalNode> right_node)
+{
auto first = tracked_child_nodes.lower_bound(split_pos);
auto iter = first;
while (iter != tracked_child_nodes.end()) {
tracked_child_nodes.erase(first, tracked_child_nodes.end());
}
-void InternalNode::validate_child(const Node& child) const {
+void InternalNode::validate_child(const Node& child) const
+{
#ifndef NDEBUG
assert(impl->level() - 1 == child.impl->level());
assert(this == child.parent_info().ptr);
}
node_future<InternalNode::fresh_node_t> InternalNode::allocate(
- context_t c, field_type_t field_type, bool is_level_tail, level_t level) {
+ context_t c, field_type_t field_type, bool is_level_tail, level_t level)
+{
return InternalNodeImpl::allocate(c, field_type, is_level_tail, level
).safe_then([](auto&& fresh_impl) {
auto node = Ref<InternalNode>(new InternalNode(
LeafNode::LeafNode(LeafNodeImpl* impl, NodeImplURef&& impl_ref)
: Node(std::move(impl_ref)), impl{impl} {}
-bool LeafNode::is_level_tail() const {
+bool LeafNode::is_level_tail() const
+{
return impl->is_level_tail();
}
std::tuple<key_view_t, const value_header_t*>
-LeafNode::get_kv(const search_position_t& pos) const {
+LeafNode::get_kv(const search_position_t& pos) const
+{
key_view_t key_view;
auto p_value_header = impl->get_p_value(pos, &key_view);
return {key_view, p_value_header};
}
node_future<> LeafNode::extend_value(
- context_t c, const search_position_t& pos, value_size_t extend_size) {
+ context_t c, const search_position_t& pos, value_size_t extend_size)
+{
ceph_abort("not implemented");
return node_ertr::now();
}
node_future<> LeafNode::trim_value(
- context_t c, const search_position_t& pos, value_size_t trim_size) {
+ context_t c, const search_position_t& pos, value_size_t trim_size)
+{
ceph_abort("not implemented");
return node_ertr::now();
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
-LeafNode::prepare_mutate_value_payload(context_t c) {
+LeafNode::prepare_mutate_value_payload(context_t c)
+{
return impl->prepare_mutate_value_payload(c);
}
node_future<Ref<tree_cursor_t>>
-LeafNode::lookup_smallest(context_t) {
+LeafNode::lookup_smallest(context_t)
+{
if (unlikely(impl->is_empty())) {
assert(is_root());
return node_ertr::make_ready_future<Ref<tree_cursor_t>>(
}
node_future<Ref<tree_cursor_t>>
-LeafNode::lookup_largest(context_t) {
+LeafNode::lookup_largest(context_t)
+{
if (unlikely(impl->is_empty())) {
assert(is_root());
return node_ertr::make_ready_future<Ref<tree_cursor_t>>(
node_future<Node::search_result_t>
LeafNode::lower_bound_tracked(
- context_t c, const key_hobj_t& key, MatchHistory& history) {
+ context_t c, const key_hobj_t& key, MatchHistory& history)
+{
key_view_t index_key;
auto result = impl->lower_bound(key, history, &index_key);
Ref<tree_cursor_t> cursor;
search_result_t{cursor, result.mstat});
}
-node_future<> LeafNode::do_get_tree_stats(context_t, tree_stats_t& stats) {
+node_future<> LeafNode::do_get_tree_stats(context_t, tree_stats_t& stats)
+{
auto nstats = impl->get_stats();
stats.size_persistent_leaf += nstats.size_persistent;
stats.size_filled_leaf += nstats.size_filled;
}
node_future<> LeafNode::test_clone_root(
- context_t c_other, RootNodeTracker& tracker_other) const {
+ context_t c_other, RootNodeTracker& tracker_other) const
+{
assert(is_root());
assert(impl->is_level_tail());
assert(impl->field_type() == field_type_t::N0);
node_future<Ref<tree_cursor_t>> LeafNode::insert_value(
context_t c, const key_hobj_t& key, value_config_t vconf,
const search_position_t& pos, const MatchHistory& history,
- match_stat_t mstat) {
+ match_stat_t mstat)
+{
#ifndef NDEBUG
if (pos.is_end()) {
assert(impl->is_level_tail());
}
node_future<Ref<LeafNode>> LeafNode::allocate_root(
- context_t c, RootNodeTracker& root_tracker) {
+ context_t c, RootNodeTracker& root_tracker)
+{
return LeafNode::allocate(c, field_type_t::N0, true
).safe_then([c, &root_tracker](auto fresh_node) {
auto root = fresh_node.node;
Ref<tree_cursor_t> LeafNode::get_or_track_cursor(
const search_position_t& position,
- const key_view_t& key, const value_header_t* p_value_header) {
+ const key_view_t& key, const value_header_t* p_value_header)
+{
assert(!position.is_end());
assert(p_value_header);
Ref<tree_cursor_t> p_cursor;
return p_cursor;
}
-void LeafNode::validate_cursor(tree_cursor_t& cursor) const {
+void LeafNode::validate_cursor(tree_cursor_t& cursor) const
+{
#ifndef NDEBUG
assert(this == cursor.get_leaf_node().get());
assert(!cursor.is_end());
Ref<tree_cursor_t> LeafNode::track_insert(
const search_position_t& insert_pos, match_stage_t insert_stage,
- const value_header_t* p_value_header) {
+ const value_header_t* p_value_header)
+{
// update cursor position
auto pos_upper_bound = insert_pos;
pos_upper_bound.index_by_stage(insert_stage) = INDEX_UPPER_BOUND;
}
void LeafNode::track_split(
- const search_position_t& split_pos, Ref<LeafNode> right_node) {
+ const search_position_t& split_pos, Ref<LeafNode> right_node)
+{
// update cursor ownership and position
auto first = tracked_cursors.lower_bound(split_pos);
auto iter = first;
}
node_future<LeafNode::fresh_node_t> LeafNode::allocate(
- context_t c, field_type_t field_type, bool is_level_tail) {
+ context_t c, field_type_t field_type, bool is_level_tail)
+{
return LeafNodeImpl::allocate(c, field_type, is_level_tail
).safe_then([](auto&& fresh_impl) {
auto node = Ref<LeafNode>(new LeafNode(
namespace crimson::os::seastore::onode {
-std::pair<node_type_t, field_type_t> NodeExtent::get_types() const {
+std::pair<node_type_t, field_type_t> NodeExtent::get_types() const
+{
const auto header = reinterpret_cast<const node_header_t*>(get_read());
auto node_type = header->get_node_type();
auto field_type = header->get_field_type();
return {node_type, *field_type};
}
-NodeExtentManagerURef NodeExtentManager::create_dummy(bool is_sync) {
+NodeExtentManagerURef NodeExtentManager::create_dummy(bool is_sync)
+{
if (is_sync) {
return NodeExtentManagerURef(new DummyNodeExtentManager<true>());
} else {
}
NodeExtentManagerURef NodeExtentManager::create_seastore(
- TransactionManager& tm, laddr_t min_laddr) {
+ TransactionManager& tm, laddr_t min_laddr)
+{
return NodeExtentManagerURef(new SeastoreNodeExtentManager(tm, min_laddr));
}
namespace {
-seastar::logger& logger() {
+seastar::logger& logger()
+{
return crimson::get_logger(ceph_subsys_filestore);
}
namespace crimson::os::seastore::onode {
static DeltaRecorderURef create_replay_recorder(
- node_type_t node_type, field_type_t field_type) {
+ node_type_t node_type, field_type_t field_type)
+{
if (node_type == node_type_t::LEAF) {
if (field_type == field_type_t::N0) {
return DeltaRecorderT<node_fields_0_t, node_type_t::LEAF>::create_for_replay();
}
}
-void SeastoreSuper::write_root_laddr(context_t c, laddr_t addr) {
+void SeastoreSuper::write_root_laddr(context_t c, laddr_t addr)
+{
logger().info("OTree::Seastore: update root {:#x} ...", addr);
root_addr = addr;
auto nm = static_cast<SeastoreNodeExtentManager*>(&c.nm);
}
NodeExtentRef SeastoreNodeExtent::mutate(
- context_t c, DeltaRecorderURef&& _recorder) {
+ context_t c, DeltaRecorderURef&& _recorder)
+{
logger().debug("OTree::Seastore: mutate {:#x} ...", get_laddr());
auto nm = static_cast<SeastoreNodeExtentManager*>(&c.nm);
auto extent = nm->get_tm().get_mutable_extent(c.t, this);
return ret;
}
-void SeastoreNodeExtent::apply_delta(const ceph::bufferlist& bl) {
+void SeastoreNodeExtent::apply_delta(const ceph::bufferlist& bl)
+{
logger().debug("OTree::Seastore: replay {:#x} ...", get_laddr());
if (!recorder) {
auto [node_type, field_type] = get_types();
// XXX: branchless allocation
InternalNodeImpl::alloc_ertr::future<InternalNodeImpl::fresh_impl_t>
InternalNodeImpl::allocate(
- context_t c, field_type_t type, bool is_level_tail, level_t level) {
+ context_t c, field_type_t type, bool is_level_tail, level_t level)
+{
if (type == field_type_t::N0) {
return InternalNode0::allocate(c, is_level_tail, level);
} else if (type == field_type_t::N1) {
LeafNodeImpl::alloc_ertr::future<LeafNodeImpl::fresh_impl_t>
LeafNodeImpl::allocate(
- context_t c, field_type_t type, bool is_level_tail) {
+ context_t c, field_type_t type, bool is_level_tail)
+{
if (type == field_type_t::N0) {
return LeafNode0::allocate(c, is_level_tail, 0);
} else if (type == field_type_t::N1) {
}
InternalNodeImplURef InternalNodeImpl::load(
- NodeExtentRef extent, field_type_t type, bool expect_is_level_tail) {
+ NodeExtentRef extent, field_type_t type, bool expect_is_level_tail)
+{
if (type == field_type_t::N0) {
return InternalNode0::load(extent, expect_is_level_tail);
} else if (type == field_type_t::N1) {
}
LeafNodeImplURef LeafNodeImpl::load(
- NodeExtentRef extent, field_type_t type, bool expect_is_level_tail) {
+ NodeExtentRef extent, field_type_t type, bool expect_is_level_tail)
+{
if (type == field_type_t::N0) {
return LeafNode0::load(extent, expect_is_level_tail);
} else if (type == field_type_t::N1) {
template <KeyT KT>
memory_range_t ITER_T::insert_prefix(
NodeExtentMutable& mut, const ITER_T& iter, const full_key_t<KT>& key,
- bool is_end, node_offset_t size, const char* p_left_bound) {
+ bool is_end, node_offset_t size, const char* p_left_bound)
+{
// 1. insert range
char* p_insert;
if (is_end) {
template <node_type_t NODE_TYPE>
void ITER_T::update_size(
- NodeExtentMutable& mut, const ITER_T& iter, int change) {
+ NodeExtentMutable& mut, const ITER_T& iter, int change)
+{
node_offset_t offset = iter.get_back_offset();
int new_size = change + offset;
assert(new_size > 0 && new_size < NODE_BLOCK_SIZE);
}
template <node_type_t NODE_TYPE>
-node_offset_t ITER_T::trim_until(NodeExtentMutable&, const ITER_T& iter) {
+node_offset_t ITER_T::trim_until(NodeExtentMutable&, const ITER_T& iter)
+{
assert(iter.index() != 0);
size_t ret = iter.p_end() - iter.p_items_start;
assert(ret < NODE_BLOCK_SIZE);
template <node_type_t NODE_TYPE>
node_offset_t ITER_T::trim_at(
- NodeExtentMutable& mut, const ITER_T& iter, node_offset_t trimmed) {
+ NodeExtentMutable& mut, const ITER_T& iter, node_offset_t trimmed)
+{
size_t trim_size = iter.p_start() - iter.p_items_start + trimmed;
assert(trim_size < NODE_BLOCK_SIZE);
assert(iter.get_back_offset() > trimmed);
template <node_type_t NODE_TYPE>
template <KeyT KT>
-bool APPEND_T::append(const ITER_T& src, index_t& items) {
+bool APPEND_T::append(const ITER_T& src, index_t& items)
+{
auto p_end = src.p_end();
bool append_till_end = false;
if (is_valid_index(items)) {
template <node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
-APPEND_T::open_nxt(const key_get_type& partial_key) {
+APPEND_T::open_nxt(const key_get_type& partial_key)
+{
p_append -= sizeof(node_offset_t);
p_offset_while_open = p_append;
ns_oid_view_t::append(*p_mut, partial_key, p_append);
template <node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
-APPEND_T::open_nxt(const full_key_t<KT>& key) {
+APPEND_T::open_nxt(const full_key_t<KT>& key)
+{
p_append -= sizeof(node_offset_t);
p_offset_while_open = p_append;
ns_oid_view_t::append<KT>(*p_mut, key, p_append);
template <node_type_t NODE_TYPE>
template <KeyT KT>
-void APPEND_T::wrap_nxt(char* _p_append) {
+void APPEND_T::wrap_nxt(char* _p_append)
+{
assert(_p_append < p_append);
p_mut->copy_in_absolute(
p_offset_while_open, node_offset_t(p_offset_while_open - _p_append));
namespace crimson::os::seastore::onode {
void string_key_view_t::append_str(
- NodeExtentMutable& mut, std::string_view str, char*& p_append) {
+ NodeExtentMutable& mut, std::string_view str, char*& p_append)
+{
assert(is_valid_size(str.length()));
p_append -= sizeof(string_size_t);
string_size_t len = str.length();
}
void string_key_view_t::append_dedup(
- NodeExtentMutable& mut, const Type& dedup_type, char*& p_append) {
+ NodeExtentMutable& mut, const Type& dedup_type, char*& p_append)
+{
p_append -= sizeof(string_size_t);
if (dedup_type == Type::MIN) {
mut.copy_in_absolute(p_append, MIN);
#define NODE_INST(FT, NT) node_extent_t<FT, NT>
template <typename FieldType, node_type_t NODE_TYPE>
-const char* NODE_T::p_left_bound() const {
+const char* NODE_T::p_left_bound() const
+{
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
// N3 internal node doesn't have the right part
return nullptr;
}
template <typename FieldType, node_type_t NODE_TYPE>
-node_offset_t NODE_T::size_to_nxt_at(index_t index) const {
+node_offset_t NODE_T::size_to_nxt_at(index_t index) const
+{
assert(index < keys());
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
}
template <typename FieldType, node_type_t NODE_TYPE>
-memory_range_t NODE_T::get_nxt_container(index_t index) const {
+memory_range_t NODE_T::get_nxt_container(index_t index) const
+{
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
ceph_abort("N3 internal node doesn't have the right part");
} else {
void NODE_T::bootstrap_extent(
NodeExtentMutable& mut,
field_type_t field_type, node_type_t node_type,
- bool is_level_tail, level_t level) {
+ bool is_level_tail, level_t level)
+{
node_header_t::bootstrap_extent(
mut, field_type, node_type, is_level_tail, level);
mut.copy_in_relative(
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::update_is_level_tail(
- NodeExtentMutable& mut, const node_extent_t& extent, bool value) {
+ NodeExtentMutable& mut, const node_extent_t& extent, bool value)
+{
node_header_t::update_is_level_tail(mut, extent.p_fields->header, value);
}
template <KeyT KT>
memory_range_t NODE_T::insert_prefix_at(
NodeExtentMutable& mut, const node_extent_t& node, const full_key_t<KT>& key,
- index_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound)
+{
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
assert(index <= node.keys());
template <typename FieldType, node_type_t NODE_TYPE>
void NODE_T::update_size_at(
- NodeExtentMutable& mut, const node_extent_t& node, index_t index, int change) {
+ NodeExtentMutable& mut, const node_extent_t& node, index_t index, int change)
+{
assert(index < node.keys());
FieldType::update_size_at(mut, node.fields(), index, change);
}
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_until(
- NodeExtentMutable& mut, const node_extent_t& node, index_t index) {
+ NodeExtentMutable& mut, const node_extent_t& node, index_t index)
+{
assert(!node.is_level_tail());
auto keys = node.keys();
assert(index <= keys);
template <typename FieldType, node_type_t NODE_TYPE>
node_offset_t NODE_T::trim_at(
NodeExtentMutable& mut, const node_extent_t& node,
- index_t index, node_offset_t trimmed) {
+ index_t index, node_offset_t trimmed)
+{
assert(!node.is_level_tail());
assert(index < node.keys());
if constexpr (std::is_same_v<FieldType, internal_fields_3_t>) {
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
-void APPEND_T::append(const node_extent_t& src, index_t from, index_t items) {
+void APPEND_T::append(const node_extent_t& src, index_t from, index_t items)
+{
assert(from <= src.keys());
if (p_src == nullptr) {
p_src = &src;
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
void APPEND_T::append(
- const full_key_t<KT>& key, const value_input_t& value, const value_t*& p_value) {
+ const full_key_t<KT>& key, const value_input_t& value, const value_t*& p_value)
+{
if constexpr (FIELD_TYPE == field_type_t::N3) {
ceph_abort("not implemented");
} else {
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
-APPEND_T::open_nxt(const key_get_type& partial_key) {
+APPEND_T::open_nxt(const key_get_type& partial_key)
+{
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
FieldType::append_key(*p_mut, partial_key, p_append_left);
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
std::tuple<NodeExtentMutable*, char*>
-APPEND_T::open_nxt(const full_key_t<KT>& key) {
+APPEND_T::open_nxt(const full_key_t<KT>& key)
+{
if constexpr (FIELD_TYPE == field_type_t::N0 ||
FIELD_TYPE == field_type_t::N1) {
FieldType::template append_key<KT>(*p_mut, key, p_append_left);
template <typename FieldType, node_type_t NODE_TYPE>
template <KeyT KT>
-char* APPEND_T::wrap() {
+char* APPEND_T::wrap()
+{
assert(p_append_left <= p_append_right);
assert(p_src);
if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
void node_header_t::bootstrap_extent(
NodeExtentMutable& mut,
field_type_t field_type, node_type_t node_type,
- bool is_level_tail, level_t level) {
+ bool is_level_tail, level_t level)
+{
node_header_t header;
header.set_field_type(field_type);
header.set_node_type(node_type);
}
void node_header_t::update_is_level_tail(
- NodeExtentMutable& mut, const node_header_t& header, bool value) {
+ NodeExtentMutable& mut, const node_header_t& header, bool value)
+{
auto& _header = const_cast<node_header_t&>(header);
_header.set_is_level_tail(value);
mut.validate_inplace_update(_header);
template <typename SlotType>
void F013_T::update_size_at(
- NodeExtentMutable& mut, const me_t& node, index_t index, int change) {
+ NodeExtentMutable& mut, const me_t& node, index_t index, int change)
+{
assert(index <= node.num_keys);
for (const auto* p_slot = &node.slots[index];
p_slot < &node.slots[node.num_keys];
template <typename SlotType>
void F013_T::append_key(
- NodeExtentMutable& mut, const key_t& key, char*& p_append) {
+ NodeExtentMutable& mut, const key_t& key, char*& p_append)
+{
mut.copy_in_absolute(p_append, key);
p_append += sizeof(key_t);
}
template <typename SlotType>
void F013_T::append_offset(
- NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append) {
+ NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append)
+{
mut.copy_in_absolute(p_append, offset_to_right);
p_append += sizeof(node_offset_t);
}
template <KeyT KT>
void F013_T::insert_at(
NodeExtentMutable& mut, const full_key_t<KT>& key,
- const me_t& node, index_t index, node_offset_t size_right) {
+ const me_t& node, index_t index, node_offset_t size_right)
+{
assert(index <= node.num_keys);
update_size_at(mut, node, index, size_right);
auto p_insert = const_cast<char*>(fields_start(node)) +
F013_TEMPLATE(slot_3_t);
void node_fields_2_t::append_offset(
- NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append) {
+ NodeExtentMutable& mut, node_offset_t offset_to_right, char*& p_append)
+{
mut.copy_in_absolute(p_append, offset_to_right);
p_append += sizeof(node_offset_t);
}
const laddr_packed_t* internal_sub_items_t::insert_at(
NodeExtentMutable& mut, const internal_sub_items_t& sub_items,
const full_key_t<KT>& key, const laddr_t& value,
- index_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound)
+{
assert(index <= sub_items.keys());
assert(size == estimate_insert<KT>(key, value));
const char* p_shift_start = p_left_bound;
IA_TEMPLATE(KeyT::HOBJ);
node_offset_t internal_sub_items_t::trim_until(
- NodeExtentMutable&, internal_sub_items_t& items, index_t index) {
+ NodeExtentMutable&, internal_sub_items_t& items, index_t index)
+{
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
- const internal_sub_items_t& src, index_t from, index_t items) {
+ const internal_sub_items_t& src, index_t from, index_t items)
+{
assert(from <= src.keys());
if (items == 0) {
return;
template <KeyT KT>
void internal_sub_items_t::Appender<KT>::append(
const full_key_t<KT>& key, const laddr_t& value,
- const laddr_packed_t*& p_value) {
+ const laddr_packed_t*& p_value)
+{
p_append -= sizeof(internal_sub_item_t);
auto item = internal_sub_item_t{
snap_gen_t::from_key<KT>(key), laddr_packed_t{value}};
const value_header_t* leaf_sub_items_t::insert_at(
NodeExtentMutable& mut, const leaf_sub_items_t& sub_items,
const full_key_t<KT>& key, const value_config_t& value,
- index_t index, node_offset_t size, const char* p_left_bound) {
+ index_t index, node_offset_t size, const char* p_left_bound)
+{
assert(index <= sub_items.keys());
assert(size == estimate_insert<KT>(key, value));
// a. [... item(index)] << size
const value_config_t&, index_t, node_offset_t, const char*);
node_offset_t leaf_sub_items_t::trim_until(
- NodeExtentMutable& mut, leaf_sub_items_t& items, index_t index) {
+ NodeExtentMutable& mut, leaf_sub_items_t& items, index_t index)
+{
assert(index != 0);
auto keys = items.keys();
assert(index <= keys);
template<class... Ts> overloaded(Ts...) -> overloaded<Ts...>;
template <KeyT KT>
-char* leaf_sub_items_t::Appender<KT>::wrap() {
+char* leaf_sub_items_t::Appender<KT>::wrap()
+{
auto p_cur = p_append;
num_keys_t num_keys = 0;
for (auto i = 0u; i < cnt; ++i) {
namespace crimson::os::seastore::onode {
-Ref<Node> RootNodeTrackerIsolated::get_root(Transaction& t) const {
+Ref<Node> RootNodeTrackerIsolated::get_root(Transaction& t) const
+{
auto iter = tracked_supers.find(&t);
if (iter == tracked_supers.end()) {
return nullptr;
}
}
-Ref<Node> RootNodeTrackerShared::get_root(Transaction&) const {
+Ref<Node> RootNodeTrackerShared::get_root(Transaction&) const
+{
if (is_clean()) {
return nullptr;
} else {
using future = Value::future<ValueT>;
ceph::bufferlist&
-ValueDeltaRecorder::get_encoded(NodeExtentMutable& payload_mut) {
+ValueDeltaRecorder::get_encoded(NodeExtentMutable& payload_mut)
+{
ceph::encode(node_delta_op_t::SUBOP_UPDATE_VALUE, encoded);
node_offset_t offset = payload_mut.get_node_offset();
assert(offset > sizeof(value_header_t));
Value::~Value() {}
-future<> Value::extend(Transaction& t, value_size_t extend_size) {
+future<> Value::extend(Transaction& t, value_size_t extend_size)
+{
auto target_size = get_payload_size() + extend_size;
return p_cursor->extend_value(get_context(t), extend_size
).safe_then([this, target_size] {
});
}
-future<> Value::trim(Transaction& t, value_size_t trim_size) {
+future<> Value::trim(Transaction& t, value_size_t trim_size)
+{
assert(get_payload_size() > trim_size);
auto target_size = get_payload_size() - trim_size;
return p_cursor->trim_value(get_context(t), trim_size
});
}
-const value_header_t* Value::read_value_header() const {
+const value_header_t* Value::read_value_header() const
+{
return p_cursor->read_value_header(vb.get_header_magic());
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
-Value::do_prepare_mutate_payload(Transaction& t) {
+Value::do_prepare_mutate_payload(Transaction& t)
+{
return p_cursor->prepare_mutate_value_payload(get_context(t));
}
std::unique_ptr<ValueDeltaRecorder>
build_value_recorder_by_type(ceph::bufferlist& encoded,
- const value_magic_t& magic) {
+ const value_magic_t& magic)
+{
std::unique_ptr<ValueDeltaRecorder> ret;
switch (magic) {
case value_magic_t::TEST: