#include "crimson/common/errorator.h"
#include "crimson/os/seastore/onode.h"
#include "crimson/os/seastore/seastore_types.h"
+#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/osd/exceptions.h"
namespace crimson::os::seastore {
using base_ertr = crimson::errorator<
crimson::ct_error::eagain>;
+ using base_iertr = trans_iertr<base_ertr>;
public:
using mkfs_ertr = base_ertr;
using mkfs_ret = mkfs_ertr::future<>;
virtual mkfs_ret mkfs(Transaction &t) = 0;
- using contains_onode_ertr = base_ertr;
- using contains_onode_ret = contains_onode_ertr::future<bool>;
+ using contains_onode_iertr = base_iertr;
+ using contains_onode_ret = contains_onode_iertr::future<bool>;
virtual contains_onode_ret contains_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
- using get_onode_ertr = base_ertr::extend<
+ using get_onode_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
- using get_onode_ret = get_onode_ertr::future<
+ using get_onode_ret = get_onode_iertr::future<
OnodeRef>;
virtual get_onode_ret get_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
- using get_or_create_onode_ertr = base_ertr::extend<
+ using get_or_create_onode_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
- using get_or_create_onode_ret = get_or_create_onode_ertr::future<
+ using get_or_create_onode_ret = get_or_create_onode_iertr::future<
OnodeRef>;
virtual get_or_create_onode_ret get_or_create_onode(
Transaction &trans,
const ghobject_t &hoid) = 0;
- using get_or_create_onodes_ertr = base_ertr::extend<
+ using get_or_create_onodes_iertr = base_iertr::extend<
crimson::ct_error::value_too_large>;
- using get_or_create_onodes_ret = get_or_create_onodes_ertr::future<
+ using get_or_create_onodes_ret = get_or_create_onodes_iertr::future<
std::vector<OnodeRef>>;
virtual get_or_create_onodes_ret get_or_create_onodes(
Transaction &trans,
const std::vector<ghobject_t> &hoids) = 0;
- using write_dirty_ertr = base_ertr;
- using write_dirty_ret = write_dirty_ertr::future<>;
+ using write_dirty_iertr = base_iertr;
+ using write_dirty_ret = write_dirty_iertr::future<>;
virtual write_dirty_ret write_dirty(
Transaction &trans,
const std::vector<OnodeRef> &onodes) = 0;
- using erase_onode_ertr = base_ertr;
- using erase_onode_ret = erase_onode_ertr::future<>;
+ using erase_onode_iertr = base_iertr;
+ using erase_onode_ret = erase_onode_iertr::future<>;
virtual erase_onode_ret erase_onode(
Transaction &trans,
OnodeRef &onode) = 0;
- using list_onodes_ertr = base_ertr;
+ using list_onodes_iertr = base_iertr;
using list_onodes_bare_ret = std::tuple<std::vector<ghobject_t>, ghobject_t>;
- using list_onodes_ret = list_onodes_ertr::future<list_onodes_bare_ret>;
+ using list_onodes_ret = list_onodes_iertr::future<list_onodes_bare_ret>;
virtual list_onodes_ret list_onodes(
Transaction &trans,
const ghobject_t& start,
LOG_PREFIX(FLTreeOnodeManager::get_onode);
return tree.find(
trans, hoid
- ).safe_then([this, &hoid, &trans, FNAME](auto cursor)
+ ).si_then([this, &hoid, &trans, FNAME](auto cursor)
-> get_onode_ret {
if (cursor == tree.end()) {
DEBUGT("no entry for {}", trans, hoid);
return crimson::ct_error::enoent::make();
}
auto val = OnodeRef(new FLTreeOnode(cursor.value()));
- return seastar::make_ready_future<OnodeRef>(
+ return get_onode_iertr::make_ready_future<OnodeRef>(
val
);
});
return tree.insert(
trans, hoid,
OnodeTree::tree_value_config_t{sizeof(onode_layout_t)}
- ).safe_then([&trans, &hoid, FNAME](auto p)
+ ).si_then([&trans, &hoid, FNAME](auto p)
-> get_or_create_onode_ret {
auto [cursor, created] = std::move(p);
auto val = OnodeRef(new FLTreeOnode(cursor.value()));
DEBUGT("created onode for entry for {}", trans, hoid);
val->get_mutable_layout(trans) = onode_layout_t{};
}
- return seastar::make_ready_future<OnodeRef>(
+ return get_or_create_onode_iertr::make_ready_future<OnodeRef>(
val
);
});
std::vector<OnodeRef>(),
[this, &hoids, &trans](auto &ret) {
ret.reserve(hoids.size());
- return crimson::do_for_each(
+ return trans_intr::do_for_each(
hoids,
[this, &trans, &ret](auto &hoid) {
return get_or_create_onode(trans, hoid
- ).safe_then([&ret](auto &&onoderef) {
+ ).si_then([&ret](auto &&onoderef) {
ret.push_back(std::move(onoderef));
});
- }).safe_then([&ret] {
+ }).si_then([&ret] {
return std::move(ret);
});
});
Transaction &trans,
const std::vector<OnodeRef> &onodes)
{
- return crimson::do_for_each(
+ return trans_intr::do_for_each(
onodes,
- [this, &trans](auto &onode) -> eagain_future<> {
+ [this, &trans](auto &onode) -> eagain_ifuture<> {
auto &flonode = static_cast<FLTreeOnode&>(*onode);
switch (flonode.status) {
case FLTreeOnode::status_t::MUTATED: {
flonode.populate_recorder(trans);
- return seastar::now();
+ return eagain_iertr::make_ready_future<>();
}
case FLTreeOnode::status_t::DELETED: {
return tree.erase(trans, flonode);
}
case FLTreeOnode::status_t::STABLE: {
- return seastar::now();
+ return eagain_iertr::make_ready_future<>();
}
default:
__builtin_unreachable();
{
auto &flonode = static_cast<FLTreeOnode&>(*onode);
flonode.mark_delete();
- return erase_onode_ertr::now();
+ return erase_onode_iertr::now();
}
FLTreeOnodeManager::list_onodes_ret FLTreeOnodeManager::list_onodes(
uint64_t limit)
{
return tree.lower_bound(trans, start
- ).safe_then([this, &trans, end, limit] (auto&& cursor) {
+ ).si_then([this, &trans, end, limit] (auto&& cursor) {
using crimson::os::seastore::onode::full_key_t;
return seastar::do_with(
limit,
std::move(cursor),
list_onodes_bare_ret(),
[this, &trans, end] (auto& to_list, auto& current_cursor, auto& ret) {
- return crimson::repeat(
- [this, &trans, end, &to_list, ¤t_cursor, &ret] () mutable
- -> eagain_future<seastar::stop_iteration> {
+ return trans_intr::repeat(
+ [this, &trans, end, &to_list, ¤t_cursor, &ret] ()
+ -> eagain_ifuture<seastar::stop_iteration> {
if (current_cursor.is_end() ||
current_cursor.get_ghobj() >= end) {
std::get<1>(ret) = end;
}
std::get<0>(ret).emplace_back(current_cursor.get_ghobj());
return tree.get_next(trans, current_cursor
- ).safe_then([&to_list, ¤t_cursor] (auto&& next_cursor) mutable {
+ ).si_then([&to_list, ¤t_cursor] (auto&& next_cursor) mutable {
// we intentionally hold the current_cursor during get_next() to
// accelerate tree lookup.
--to_list;
current_cursor = next_cursor;
- return seastar::stop_iteration::no;
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::no);
});
- }).safe_then([&ret] () mutable {
+ }).si_then([&ret] () mutable {
return seastar::make_ready_future<list_onodes_bare_ret>(
std::move(ret));
+ // return ret;
});
});
});
OnodeTree tree;
public:
- FLTreeOnodeManager(InterruptedTransactionManager tm) :
+ FLTreeOnodeManager(TransactionManager &tm) :
tree(NodeExtentManager::create_seastore(tm)) {}
mkfs_ret mkfs(Transaction &t) {
template <class ValueT=void>
using eagain_future = eagain_ertr::future<ValueT>;
+using eagain_iertr = trans_iertr<eagain_ertr>;
+template <class ValueT=void>
+using eagain_ifuture = eagain_iertr::future<ValueT>;
+
using crimson::os::seastore::Transaction;
using crimson::os::seastore::TransactionRef;
using crimson::os::seastore::laddr_t;
}
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::get_next(context_t c)
{
assert(is_tracked());
}
template <bool FORCE_MERGE>
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase(context_t c, bool get_next)
{
assert(is_tracked());
return ref_leaf_node->erase<FORCE_MERGE>(c, position, get_next);
}
-template eagain_future<Ref<tree_cursor_t>>
+template eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase<true>(context_t, bool);
-template eagain_future<Ref<tree_cursor_t>>
+template eagain_ifuture<Ref<tree_cursor_t>>
tree_cursor_t::erase<false>(context_t, bool);
MatchKindCMP tree_cursor_t::compare_to(
return ret;
}
-eagain_future<>
+eagain_ifuture<>
tree_cursor_t::extend_value(context_t c, value_size_t extend_size)
{
assert(is_tracked());
return ref_leaf_node->extend_value(c, position, extend_size);
}
-eagain_future<>
+eagain_ifuture<>
tree_cursor_t::trim_value(context_t c, value_size_t trim_size)
{
assert(is_tracked());
return impl->level();
}
-eagain_future<Node::search_result_t> Node::lower_bound(
+eagain_ifuture<Node::search_result_t> Node::lower_bound(
context_t c, const key_hobj_t& key)
{
return seastar::do_with(
);
}
-eagain_future<std::pair<Ref<tree_cursor_t>, bool>> Node::insert(
+eagain_ifuture<std::pair<Ref<tree_cursor_t>, bool>> Node::insert(
context_t c,
const key_hobj_t& key,
value_config_t vconf,
MatchHistory(), [this, c, &key, vconf,
this_ref = std::move(this_ref)] (auto& history) mutable {
return lower_bound_tracked(c, key, history
- ).safe_then([c, &key, vconf, &history,
+ ).si_then([c, &key, vconf, &history,
this_ref = std::move(this_ref)] (auto result) mutable {
// the cursor in the result should already hold the root node upwards
this_ref.reset();
if (result.match() == MatchKindBS::EQ) {
- return eagain_ertr::make_ready_future<std::pair<Ref<tree_cursor_t>, bool>>(
+ return eagain_iertr::make_ready_future<std::pair<Ref<tree_cursor_t>, bool>>(
std::make_pair(result.p_cursor, false));
} else {
auto leaf_node = result.p_cursor->get_leaf_node();
return leaf_node->insert_value(
c, key, vconf, result.p_cursor->get_position(), history, result.mstat
- ).safe_then([](auto p_cursor) {
+ ).si_then([](auto p_cursor) {
return seastar::make_ready_future<std::pair<Ref<tree_cursor_t>, bool>>(
std::make_pair(p_cursor, true));
});
);
}
-eagain_future<std::size_t> Node::erase(
+eagain_ifuture<std::size_t> Node::erase(
context_t c,
const key_hobj_t& key,
Ref<Node>&& this_ref)
{
return lower_bound(c, key
- ).safe_then([c, this_ref = std::move(this_ref)] (auto result) mutable {
+ ).si_then([c, this_ref = std::move(this_ref)] (auto result) mutable {
// the cursor in the result should already hold the root node upwards
this_ref.reset();
if (result.match() != MatchKindBS::EQ) {
- return eagain_ertr::make_ready_future<std::size_t>(0);
+ return eagain_iertr::make_ready_future<std::size_t>(0);
}
auto ref_cursor = result.p_cursor;
return ref_cursor->erase(c, false
- ).safe_then([ref_cursor] (auto next_cursor) {
+ ).si_then([ref_cursor] (auto next_cursor) {
assert(ref_cursor->is_invalid());
assert(!next_cursor);
return std::size_t(1);
});
}
-eagain_future<tree_stats_t> Node::get_tree_stats(context_t c)
+eagain_ifuture<tree_stats_t> Node::get_tree_stats(context_t c)
{
return seastar::do_with(
tree_stats_t(), [this, c](auto& stats) {
- return do_get_tree_stats(c, stats).safe_then([&stats] {
+ return do_get_tree_stats(c, stats).si_then([&stats] {
return stats;
});
}
eagain_future<> Node::mkfs(context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::Node::mkfs);
- return LeafNode::allocate_root(c, root_tracker
+ return with_trans_intr(
+ c.t,
+ [c, &root_tracker](auto &t) {
+ return LeafNode::allocate_root(c, root_tracker);
+ }
).safe_then([c, FNAME](auto ret) {
INFOT("allocated root {}", c.t, ret->get_name());
});
}
-eagain_future<Ref<Node>> Node::load_root(context_t c, RootNodeTracker& root_tracker)
+eagain_ifuture<Ref<Node>> Node::load_root(context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::Node::load_root);
return c.nm.get_super(c.t, root_tracker
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
ceph_abort("fatal error");
})
- ).safe_then([c, &root_tracker, FNAME](auto&& _super) {
+ ).si_then([c, &root_tracker, FNAME](auto&& _super) {
auto root_addr = _super->get_root_laddr();
assert(root_addr != L_ADDR_NULL);
TRACET("loading root_addr={:x} ...", c.t, root_addr);
return Node::load(c, root_addr, true
- ).safe_then([c, _super = std::move(_super),
+ ).si_then([c, _super = std::move(_super),
&root_tracker, FNAME](auto root) mutable {
TRACET("loaded {}", c.t, root->get_name());
assert(root->impl->field_type() == field_type_t::N0);
return ret;
}
-eagain_future<> Node::upgrade_root(context_t c)
+eagain_ifuture<> Node::upgrade_root(context_t c)
{
LOG_PREFIX(OTree::Node::upgrade_root);
assert(impl->field_type() == field_type_t::N0);
auto super_to_move = deref_super();
return InternalNode::allocate_root(
c, impl->level(), impl->laddr(), std::move(super_to_move)
- ).safe_then([this, c, FNAME](auto new_root) {
+ ).si_then([this, c, FNAME](auto new_root) {
as_child(search_position_t::end(), new_root);
INFOT("upgraded from {} to {}",
c.t, get_name(), new_root->get_name());
return parent_ref;
}
-eagain_future<> Node::apply_split_to_parent(
+eagain_ifuture<> Node::apply_split_to_parent(
context_t c,
Ref<Node>&& this_ref,
Ref<Node>&& split_right,
c, std::move(this_ref), std::move(split_right), update_right_index);
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
Node::get_next_cursor_from_parent(context_t c)
{
assert(!impl->is_level_tail());
}
template <bool FORCE_MERGE>
-eagain_future<>
+eagain_ifuture<>
Node::try_merge_adjacent(
context_t c, bool update_parent_index, Ref<Node>&& this_ref)
{
return fix_parent_index(c, std::move(this_ref), false);
} else {
parent_info().ptr->validate_child_tracked(*this);
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
}
}
return parent_info().ptr->get_child_peers(c, parent_info().position
- ).safe_then([c, this_ref = std::move(this_ref), this, FNAME,
- update_parent_index] (auto lr_nodes) mutable -> eagain_future<> {
+ ).si_then([c, this_ref = std::move(this_ref), this, FNAME,
+ update_parent_index] (auto lr_nodes) mutable -> eagain_ifuture<> {
auto& [lnode, rnode] = lr_nodes;
Ref<Node> left_for_merge;
Ref<Node> right_for_merge;
// fresh extent, thus no need to generate delta.
auto left_addr = left_for_merge->impl->laddr();
return left_for_merge->rebuild_extent(c
- ).safe_then([c, update_index_after_merge,
+ ).si_then([c, update_index_after_merge,
left_addr,
merge_stage = merge_stage,
merge_size = merge_size,
return fix_parent_index(c, std::move(*p_this_ref), false);
} else {
parent_info().ptr->validate_child_tracked(*this);
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
// XXX: rebalance
});
}
-template eagain_future<> Node::try_merge_adjacent<true>(context_t, bool, Ref<Node>&&);
-template eagain_future<> Node::try_merge_adjacent<false>(context_t, bool, Ref<Node>&&);
+template eagain_ifuture<> Node::try_merge_adjacent<true>(context_t, bool, Ref<Node>&&);
+template eagain_ifuture<> Node::try_merge_adjacent<false>(context_t, bool, Ref<Node>&&);
-eagain_future<> Node::erase_node(context_t c, Ref<Node>&& this_ref)
+eagain_ifuture<> Node::erase_node(context_t c, Ref<Node>&& this_ref)
{
// To erase a node:
// 1. I'm supposed to have already untracked any children or cursors
}
template <bool FORCE_MERGE>
-eagain_future<> Node::fix_parent_index(
+eagain_ifuture<> Node::fix_parent_index(
context_t c, Ref<Node>&& this_ref, bool check_downgrade)
{
assert(!is_root());
return parent_info().ptr->fix_index<FORCE_MERGE>(
c, std::move(this_ref), check_downgrade);
}
-template eagain_future<> Node::fix_parent_index<true>(context_t, Ref<Node>&&, bool);
-template eagain_future<> Node::fix_parent_index<false>(context_t, Ref<Node>&&, bool);
+template eagain_ifuture<> Node::fix_parent_index<true>(context_t, Ref<Node>&&, bool);
+template eagain_ifuture<> Node::fix_parent_index<false>(context_t, Ref<Node>&&, bool);
-eagain_future<Ref<Node>> Node::load(
+eagain_ifuture<Ref<Node>> Node::load(
context_t c, laddr_t addr, bool expect_is_level_tail)
{
LOG_PREFIX(OTree::Node::load);
return c.nm.read_extent(c.t, addr
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, addr, expect_is_level_tail] {
ERRORT("EIO -- addr={:x}, is_level_tail={}",
c.t, addr, expect_is_level_tail);
ceph_abort("fatal error");
})
- ).safe_then([FNAME, c, addr, expect_is_level_tail](auto extent)
- -> eagain_future<Ref<Node>> {
- if (c.t.is_conflicted()) {
- return crimson::ct_error::eagain::make();
- }
- assert(extent->is_valid());
+ ).si_then([FNAME, c, addr, expect_is_level_tail](auto extent)
+ -> eagain_ifuture<Ref<Node>> {
auto header = extent->get_header();
auto field_type = header.get_field_type();
if (!field_type) {
ceph_abort("fatal error");
}
auto impl = LeafNodeImpl::load(extent, *field_type);
- return eagain_ertr::make_ready_future<Ref<Node>>(
+ return eagain_iertr::make_ready_future<Ref<Node>>(
new LeafNode(impl.get(), std::move(impl)));
} else if (node_type == node_type_t::INTERNAL) {
if (extent->get_length() != c.vb.get_internal_node_size()) {
ceph_abort("fatal error");
}
auto impl = InternalNodeImpl::load(extent, *field_type);
- return eagain_ertr::make_ready_future<Ref<Node>>(
+ return eagain_iertr::make_ready_future<Ref<Node>>(
new InternalNode(impl.get(), std::move(impl)));
} else {
ceph_abort("impossible path");
});
}
-eagain_future<NodeExtentMutable> Node::rebuild_extent(context_t c)
+eagain_ifuture<NodeExtentMutable> Node::rebuild_extent(context_t c)
{
LOG_PREFIX(OTree::Node::rebuild_extent);
DEBUGT("{} ...", c.t, get_name());
return impl->rebuild_extent(c);
}
-eagain_future<> Node::retire(context_t c, Ref<Node>&& this_ref)
+eagain_ifuture<> Node::retire(context_t c, Ref<Node>&& this_ref)
{
LOG_PREFIX(OTree::Node::retire);
DEBUGT("{} ...", c.t, get_name());
assert(this_ref->use_count() == 1);
return impl->retire_extent(c
- ).safe_then([this_ref = std::move(this_ref)]{ /* deallocate node */});
+ ).si_then([this_ref = std::move(this_ref)]{ /* deallocate node */});
}
void Node::make_tail(context_t c)
InternalNode::InternalNode(InternalNodeImpl* impl, NodeImplURef&& impl_ref)
: Node(std::move(impl_ref)), impl{impl} {}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::get_next_cursor(context_t c, const search_position_t& pos)
{
impl->validate_non_empty();
}
assert(p_child_addr);
return get_or_track_child(c, next_pos, p_child_addr->value
- ).safe_then([c](auto child) {
+ ).si_then([c](auto child) {
return child->lookup_smallest(c);
});
}
}
-eagain_future<> InternalNode::apply_child_split(
+eagain_ifuture<> InternalNode::apply_child_split(
context_t c, Ref<Node>&& left_child, Ref<Node>&& right_child,
bool update_right_index)
{
return insert_or_split(
c, left_pos, left_key, left_child,
(update_right_index ? right_child : nullptr)
- ).safe_then([this, c,
- this_ref = std::move(this_ref)] (auto split_right) mutable {
+ ).si_then([this, c,
+ this_ref = std::move(this_ref)] (auto split_right) mutable {
if (split_right) {
// even if update_right_index could be true,
// we haven't fixed the right_child index of this node yet,
return apply_split_to_parent(
c, std::move(this_ref), std::move(split_right), false);
} else {
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
- }).safe_then([c, update_right_index,
+ }).si_then([c, update_right_index,
right_child = std::move(right_child)] () mutable {
if (update_right_index) {
// XXX: might not need to call validate_tracked_children() in fix_index()
// there is no need to call try_merge_adjacent() because
// the filled size of the inserted node or the split right node
// won't be reduced if update_right_index is false.
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
});
}
-eagain_future<> InternalNode::erase_child(context_t c, Ref<Node>&& child_ref)
+eagain_ifuture<> InternalNode::erase_child(context_t c, Ref<Node>&& child_ref)
{
LOG_PREFIX(OTree::InternalNode::erase_child);
// this is a special version of recursive merge
// and trigger prv_child_ref->try_merge_adjacent() at the end
bool fix_tail = (child_ref->parent_info().position.is_end() &&
!impl->is_keys_empty());
- return seastar::now().then([c, this, fix_tail] {
+ return eagain_iertr::now().si_then([c, this, fix_tail] {
if (fix_tail) {
search_position_t new_tail_pos;
const laddr_packed_t* new_tail_p_addr = nullptr;
impl->get_largest_slot(&new_tail_pos, nullptr, &new_tail_p_addr);
return get_or_track_child(c, new_tail_pos, new_tail_p_addr->value);
} else {
- return eagain_ertr::make_ready_future<Ref<Node>>();
+ return eagain_iertr::make_ready_future<Ref<Node>>();
}
- }).safe_then([c, this, child_ref = std::move(child_ref), FNAME]
+ }).si_then([c, this, child_ref = std::move(child_ref), FNAME]
(auto&& new_tail_child) mutable {
auto child_pos = child_ref->parent_info().position;
if (new_tail_child) {
Ref<Node> this_ref = child_ref->deref_parent();
assert(this_ref == this);
return child_ref->retire(c, std::move(child_ref)
- ).safe_then([c, this, child_pos, FNAME,
+ ).si_then([c, this, child_pos, FNAME,
this_ref = std::move(this_ref)] () mutable {
if (impl->has_single_value()) {
// fast path without mutating the extent
}
return try_merge_adjacent(c, update_parent_index, std::move(this_ref));
}
- }).safe_then([c, new_tail_child = std::move(new_tail_child)] () mutable {
+ }).si_then([c, new_tail_child = std::move(new_tail_child)] () mutable {
// finally, check if the new tail child needs to merge
if (new_tail_child && !new_tail_child->is_root()) {
assert(new_tail_child->impl->is_level_tail());
return new_tail_child->try_merge_adjacent(
c, false, std::move(new_tail_child));
} else {
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
});
});
}
template <bool FORCE_MERGE>
-eagain_future<> InternalNode::fix_index(
+eagain_ifuture<> InternalNode::fix_index(
context_t c, Ref<Node>&& child, bool check_downgrade)
{
LOG_PREFIX(OTree::InternalNode::fix_index);
}
return insert_or_split(c, next_pos, new_key, child
- ).safe_then([this, c, update_parent_index, check_downgrade,
+ ).si_then([this, c, update_parent_index, check_downgrade,
this_ref = std::move(this_ref)] (auto split_right) mutable {
if (split_right) {
// after split, the parent index to the split_right will be incorrect
// no need to call try_downgrade_root() because the number of keys
// has not changed, and I must have at least 2 keys.
assert(!impl->is_keys_empty());
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
} else {
// for non-root, maybe need merge adjacent or fix parent,
}
template <bool FORCE_MERGE>
-eagain_future<> InternalNode::apply_children_merge(
+eagain_ifuture<> InternalNode::apply_children_merge(
context_t c, Ref<Node>&& left_child, laddr_t origin_left_addr,
Ref<Node>&& right_child, bool update_index)
{
// All good to retire the right_child.
// I'm already ref-counted by left_child.
return right_child->retire(c, std::move(right_child)
- ).safe_then([c, this, update_index,
+ ).si_then([c, this, update_index,
left_child = std::move(left_child)] () mutable {
if (update_index) {
// I'm all good but:
}
});
}
-template eagain_future<> InternalNode::apply_children_merge<true>(
+template eagain_ifuture<> InternalNode::apply_children_merge<true>(
context_t, Ref<Node>&&, laddr_t, Ref<Node>&&, bool);
-template eagain_future<> InternalNode::apply_children_merge<false>(
+template eagain_ifuture<> InternalNode::apply_children_merge<false>(
context_t, Ref<Node>&&, laddr_t, Ref<Node>&&, bool);
-eagain_future<std::pair<Ref<Node>, Ref<Node>>> InternalNode::get_child_peers(
+eagain_ifuture<std::pair<Ref<Node>, Ref<Node>>> InternalNode::get_child_peers(
context_t c, const search_position_t& pos)
{
// assume I'm already ref counted by caller
}
}
- return seastar::now().then([this, c, prev_pos, prev_p_child_addr] {
+ return eagain_iertr::now().si_then([this, c, prev_pos, prev_p_child_addr] {
if (prev_p_child_addr != nullptr) {
return get_or_track_child(c, prev_pos, prev_p_child_addr->value);
} else {
- return eagain_ertr::make_ready_future<Ref<Node>>();
+ return eagain_iertr::make_ready_future<Ref<Node>>();
}
- }).safe_then([this, c, next_pos, next_p_child_addr] (Ref<Node> lnode) {
+ }).si_then([this, c, next_pos, next_p_child_addr] (Ref<Node> lnode) {
if (next_p_child_addr != nullptr) {
return get_or_track_child(c, next_pos, next_p_child_addr->value
- ).safe_then([lnode] (Ref<Node> rnode) {
+ ).si_then([lnode] (Ref<Node> rnode) {
return seastar::make_ready_future<std::pair<Ref<Node>, Ref<Node>>>(
lnode, rnode);
});
} else {
- return eagain_ertr::make_ready_future<std::pair<Ref<Node>, Ref<Node>>>(
+ return eagain_iertr::make_ready_future<std::pair<Ref<Node>, Ref<Node>>>(
lnode, nullptr);
}
});
}
-eagain_future<Ref<InternalNode>> InternalNode::allocate_root(
+eagain_ifuture<Ref<InternalNode>> InternalNode::allocate_root(
context_t c, level_t old_root_level,
laddr_t old_root_addr, Super::URef&& super)
{
// support tree height up to 256
ceph_assert(old_root_level < MAX_LEVEL);
return InternalNode::allocate(c, field_type_t::N0, true, old_root_level + 1
- ).safe_then([c, old_root_addr,
+ ).si_then([c, old_root_addr,
super = std::move(super)](auto fresh_node) mutable {
auto root = fresh_node.node;
assert(root->impl->is_keys_empty());
});
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::lookup_smallest(context_t c)
{
impl->validate_non_empty();
const laddr_packed_t* p_child_addr;
impl->get_slot(position, nullptr, &p_child_addr);
return get_or_track_child(c, position, p_child_addr->value
- ).safe_then([c](auto child) {
+ ).si_then([c](auto child) {
return child->lookup_smallest(c);
});
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
InternalNode::lookup_largest(context_t c)
{
// NOTE: unlike LeafNode::lookup_largest(), this only works for the tail
assert(impl->is_level_tail());
auto p_child_addr = impl->get_tail_value();
return get_or_track_child(c, search_position_t::end(), p_child_addr->value
- ).safe_then([c](auto child) {
+ ).si_then([c](auto child) {
return child->lookup_largest(c);
});
}
-eagain_future<Node::search_result_t>
+eagain_ifuture<Node::search_result_t>
InternalNode::lower_bound_tracked(
context_t c, const key_hobj_t& key, MatchHistory& history)
{
auto result = impl->lower_bound(key, history);
return get_or_track_child(c, result.position, result.p_value->value
- ).safe_then([c, &key, &history](auto child) {
+ ).si_then([c, &key, &history](auto child) {
// XXX(multi-type): pass result.mstat to child
return child->lower_bound_tracked(c, key, history);
});
}
-eagain_future<> InternalNode::do_get_tree_stats(
+eagain_ifuture<> InternalNode::do_get_tree_stats(
context_t c, tree_stats_t& stats)
{
impl->validate_non_empty();
[this, this_ref, c, &stats](auto& pos, auto& p_child_addr) {
pos = search_position_t::begin();
impl->get_slot(pos, nullptr, &p_child_addr);
- return crimson::repeat(
+ return trans_intr::repeat(
[this, this_ref, c, &stats, &pos, &p_child_addr]()
- -> eagain_future<seastar::stop_iteration> {
+ -> eagain_ifuture<seastar::stop_iteration> {
return get_or_track_child(c, pos, p_child_addr->value
- ).safe_then([c, &stats](auto child) {
+ ).si_then([c, &stats](auto child) {
return child->do_get_tree_stats(c, stats);
- }).safe_then([this, this_ref, &pos, &p_child_addr] {
+ }).si_then([this, this_ref, &pos, &p_child_addr] {
if (pos.is_end()) {
return seastar::stop_iteration::yes;
} else {
validate_tracked_children();
}
-eagain_future<> InternalNode::test_clone_root(
+eagain_ifuture<> InternalNode::test_clone_root(
context_t c_other, RootNodeTracker& tracker_other) const
{
assert(is_root());
assert(impl->field_type() == field_type_t::N0);
Ref<const Node> this_ref = this;
return InternalNode::allocate(c_other, field_type_t::N0, true, impl->level()
- ).safe_then([this, c_other, &tracker_other](auto fresh_other) {
+ ).si_then([this, c_other, &tracker_other](auto fresh_other) {
impl->test_copy_to(fresh_other.mut);
auto cloned_root = fresh_other.node;
return c_other.nm.get_super(c_other.t, tracker_other
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::assert_all{"Invalid error during test clone"}
- ).safe_then([c_other, cloned_root](auto&& super_other) {
+ ).si_then([c_other, cloned_root](auto&& super_other) {
cloned_root->make_root_new(c_other, std::move(super_other));
return cloned_root;
});
- }).safe_then([this_ref, this, c_other](auto cloned_root) {
+ }).si_then([this_ref, this, c_other](auto cloned_root) {
// clone tracked children
// In some unit tests, the children are stubbed out that they
// don't exist in NodeExtentManager, and are only tracked in memory.
- return crimson::do_for_each(
+ return trans_intr::do_for_each(
tracked_child_nodes.begin(),
tracked_child_nodes.end(),
[this_ref, c_other, cloned_root](auto& kv) {
});
}
-eagain_future<> InternalNode::try_downgrade_root(
+eagain_ifuture<> InternalNode::try_downgrade_root(
context_t c, Ref<Node>&& this_ref)
{
LOG_PREFIX(OTree::InternalNode::try_downgrade_root);
assert(impl->is_level_tail());
if (!impl->is_keys_empty()) {
// I have more than 1 values, no need to downgrade
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
// proceed downgrade root to the only child
laddr_t child_addr = impl->get_tail_value()->value;
return get_or_track_child(c, search_position_t::end(), child_addr
- ).safe_then([c, this, FNAME,
+ ).si_then([c, this, FNAME,
this_ref = std::move(this_ref)] (auto child) mutable {
INFOT("downgrade {} to new root {}",
c.t, get_name(), child->get_name());
});
}
-eagain_future<Ref<InternalNode>> InternalNode::insert_or_split(
+eagain_ifuture<Ref<InternalNode>> InternalNode::insert_or_split(
context_t c,
const search_position_t& pos,
const key_view_t& insert_key,
validate_tracked_children();
}
- return eagain_ertr::make_ready_future<Ref<InternalNode>>(nullptr);
+ return eagain_iertr::make_ready_future<Ref<InternalNode>>(nullptr);
}
// proceed to split with insert
// assume I'm already ref-counted by caller
- return (is_root() ? upgrade_root(c) : eagain_ertr::now()
- ).safe_then([this, c] {
+ return (is_root() ? upgrade_root(c) : eagain_iertr::now()
+ ).si_then([this, c] {
return InternalNode::allocate(
c, impl->field_type(), impl->is_level_tail(), impl->level());
- }).safe_then([this, insert_key, insert_child, insert_pos,
+ }).si_then([this, insert_key, insert_child, insert_pos,
insert_stage=insert_stage, insert_size=insert_size,
outdated_child, c, FNAME](auto fresh_right) mutable {
// I'm the left_node and need to split into the right_node
});
}
-eagain_future<Ref<Node>> InternalNode::get_or_track_child(
+eagain_ifuture<Ref<Node>> InternalNode::get_or_track_child(
context_t c, const search_position_t& position, laddr_t child_addr)
{
LOG_PREFIX(OTree::InternalNode::get_or_track_child);
if (found != tracked_child_nodes.end()) {
TRACET("loaded child tracked {} at pos({}) addr={:x}",
c.t, found->second->get_name(), position, child_addr);
- return eagain_ertr::make_ready_future<Ref<Node>>(found->second);
+ return eagain_iertr::make_ready_future<Ref<Node>>(found->second);
}
// the child is not loaded yet
TRACET("loading child at pos({}) addr={:x} ...",
c.t, position, child_addr);
bool level_tail = position.is_end();
return Node::load(c, child_addr, level_tail
- ).safe_then([this, position, c, FNAME] (auto child) {
+ ).si_then([this, position, c, FNAME] (auto child) {
TRACET("loaded child untracked {}",
c.t, child->get_name());
if (child->level() + 1 != level()) {
child->as_child(position, this);
return child;
});
- }().safe_then([this_ref, this, position, child_addr] (auto child) {
+ }().si_then([this_ref, this, position, child_addr] (auto child) {
assert(child_addr == child->impl->laddr());
assert(position == child->parent_info().position);
std::ignore = position;
#endif
}
-eagain_future<InternalNode::fresh_node_t> InternalNode::allocate(
+eagain_ifuture<InternalNode::fresh_node_t> InternalNode::allocate(
context_t c, field_type_t field_type, bool is_level_tail, level_t level)
{
return InternalNodeImpl::allocate(c, field_type, is_level_tail, level
- ).safe_then([](auto&& fresh_impl) {
+ ).si_then([](auto&& fresh_impl) {
auto node = Ref<InternalNode>(new InternalNode(
fresh_impl.impl.get(), std::move(fresh_impl.impl)));
return fresh_node_t{node, fresh_impl.mut};
return {key_view, p_value_header};
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::get_next_cursor(context_t c, const search_position_t& pos)
{
impl->validate_non_empty();
impl->get_next_slot(next_pos, &index_key, &p_value_header);
if (next_pos.is_end()) {
if (unlikely(is_level_tail())) {
- return eagain_ertr::make_ready_future<Ref<tree_cursor_t>>(
+ return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(
tree_cursor_t::create_end(this));
} else {
return get_next_cursor_from_parent(c);
}
} else {
- return eagain_ertr::make_ready_future<Ref<tree_cursor_t>>(
+ return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(
get_or_track_cursor(next_pos, index_key, p_value_header));
}
}
template <bool FORCE_MERGE>
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase(context_t c, const search_position_t& pos, bool get_next)
{
LOG_PREFIX(OTree::LeafNode::erase);
c.t, get_name(), pos, get_next);
// get the next cursor
- return seastar::now().then([c, &pos, get_next, this] {
+ return eagain_iertr::now().si_then([c, &pos, get_next, this] {
if (get_next) {
return get_next_cursor(c, pos);
} else {
- return eagain_ertr::make_ready_future<Ref<tree_cursor_t>>();
+ return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>();
}
- }).safe_then([c, &pos, this_ref = std::move(this_ref),
+ }).si_then([c, &pos, this_ref = std::move(this_ref),
this, FNAME] (Ref<tree_cursor_t> next_cursor) mutable {
if (next_cursor && next_cursor->is_end()) {
// reset the node reference from the end cursor
next_cursor.reset();
}
- return seastar::now().then(
+ return eagain_iertr::now().si_then(
[c, &pos, this_ref = std::move(this_ref), this, FNAME] () mutable {
assert_moveable(this_ref);
#ifndef NDEBUG
validate_tracked_cursors();
if (is_root()) {
- return eagain_ertr::now();
+ return eagain_iertr::now();
} else {
bool update_parent_index;
if (impl->is_level_tail()) {
return try_merge_adjacent<FORCE_MERGE>(
c, update_parent_index, std::move(this_ref));
}
- }).safe_then([next_cursor] {
+ }).si_then([next_cursor] {
return next_cursor;
});
});
}
-template eagain_future<Ref<tree_cursor_t>>
+template eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase<true>(context_t, const search_position_t&, bool);
-template eagain_future<Ref<tree_cursor_t>>
+template eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::erase<false>(context_t, const search_position_t&, bool);
-eagain_future<> LeafNode::extend_value(
+eagain_ifuture<> LeafNode::extend_value(
context_t c, const search_position_t& pos, value_size_t extend_size)
{
ceph_abort("not implemented");
- return seastar::now();
+ return eagain_iertr::now();
}
-eagain_future<> LeafNode::trim_value(
+eagain_ifuture<> LeafNode::trim_value(
context_t c, const search_position_t& pos, value_size_t trim_size)
{
ceph_abort("not implemented");
- return seastar::now();
+ return eagain_iertr::now();
}
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
return impl->prepare_mutate_value_payload(c);
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::lookup_smallest(context_t)
{
if (unlikely(impl->is_keys_empty())) {
get_or_track_cursor(pos, index_key, p_value_header));
}
-eagain_future<Ref<tree_cursor_t>>
+eagain_ifuture<Ref<tree_cursor_t>>
LeafNode::lookup_largest(context_t)
{
if (unlikely(impl->is_keys_empty())) {
get_or_track_cursor(pos, index_key, p_value_header));
}
-eagain_future<Node::search_result_t>
+eagain_ifuture<Node::search_result_t>
LeafNode::lower_bound_tracked(
context_t c, const key_hobj_t& key, MatchHistory& history)
{
return seastar::make_ready_future<search_result_t>(ret);
}
-eagain_future<> LeafNode::do_get_tree_stats(context_t, tree_stats_t& stats)
+eagain_ifuture<> LeafNode::do_get_tree_stats(context_t, tree_stats_t& stats)
{
auto nstats = impl->get_stats();
stats.size_persistent_leaf += nstats.size_persistent;
stats.size_value_leaf += nstats.size_value;
stats.num_kvs_leaf += nstats.num_kvs;
stats.num_nodes_leaf += 1;
- return seastar::now();
+ return eagain_iertr::now();
}
void LeafNode::track_merge(
validate_tracked_cursors();
}
-eagain_future<> LeafNode::test_clone_root(
+eagain_ifuture<> LeafNode::test_clone_root(
context_t c_other, RootNodeTracker& tracker_other) const
{
assert(is_root());
assert(impl->field_type() == field_type_t::N0);
Ref<const Node> this_ref = this;
return LeafNode::allocate(c_other, field_type_t::N0, true
- ).safe_then([this, c_other, &tracker_other](auto fresh_other) {
+ ).si_then([this, c_other, &tracker_other](auto fresh_other) {
impl->test_copy_to(fresh_other.mut);
auto cloned_root = fresh_other.node;
return c_other.nm.get_super(c_other.t, tracker_other
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::assert_all{"Invalid error during test clone"}
- ).safe_then([c_other, cloned_root](auto&& super_other) {
+ ).si_then([c_other, cloned_root](auto&& super_other) {
cloned_root->make_root_new(c_other, std::move(super_other));
});
- }).safe_then([this_ref]{});
+ }).si_then([this_ref]{});
}
-eagain_future<Ref<tree_cursor_t>> LeafNode::insert_value(
+eagain_ifuture<Ref<tree_cursor_t>> LeafNode::insert_value(
context_t c, const key_hobj_t& key, value_config_t vconf,
const search_position_t& pos, const MatchHistory& history,
match_stat_t mstat)
assert(p_value_header->payload_size == vconf.payload_size);
auto ret = track_insert(insert_pos, insert_stage, p_value_header);
validate_tracked_cursors();
- return eagain_ertr::make_ready_future<Ref<tree_cursor_t>>(ret);
+ return eagain_iertr::make_ready_future<Ref<tree_cursor_t>>(ret);
}
// split and insert
Ref<Node> this_ref = this;
- return (is_root() ? upgrade_root(c) : eagain_ertr::now()
- ).safe_then([this, c] {
+ return (is_root() ? upgrade_root(c) : eagain_iertr::now()
+ ).si_then([this, c] {
return LeafNode::allocate(c, impl->field_type(), impl->is_level_tail());
- }).safe_then([this_ref = std::move(this_ref), this, c, &key, vconf, FNAME,
+ }).si_then([this_ref = std::move(this_ref), this, c, &key, vconf, FNAME,
insert_pos, insert_stage=insert_stage, insert_size=insert_size](auto fresh_right) mutable {
auto right_node = fresh_right.node;
INFOT("proceed split {} to fresh {} ...",
return apply_split_to_parent(
c, std::move(this_ref), std::move(right_node), false
- ).safe_then([ret] {
+ ).si_then([ret] {
return ret;
});
// TODO (optimize)
});
}
-eagain_future<Ref<LeafNode>> LeafNode::allocate_root(
+eagain_ifuture<Ref<LeafNode>> LeafNode::allocate_root(
context_t c, RootNodeTracker& root_tracker)
{
LOG_PREFIX(OTree::LeafNode::allocate_root);
return LeafNode::allocate(c, field_type_t::N0, true
- ).safe_then([c, &root_tracker, FNAME](auto fresh_node) {
+ ).si_then([c, &root_tracker, FNAME](auto fresh_node) {
auto root = fresh_node.node;
return c.nm.get_super(c.t, root_tracker
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle([FNAME, c] {
ERRORT("EIO during get_super()", c.t);
ceph_abort("fatal error");
})
- ).safe_then([c, root](auto&& super) {
+ ).si_then([c, root](auto&& super) {
root->make_root_new(c, std::move(super));
return root;
});
}
}
-eagain_future<LeafNode::fresh_node_t> LeafNode::allocate(
+eagain_ifuture<LeafNode::fresh_node_t> LeafNode::allocate(
context_t c, field_type_t field_type, bool is_level_tail)
{
return LeafNodeImpl::allocate(c, field_type, is_level_tail
- ).safe_then([](auto&& fresh_impl) {
+ ).si_then([](auto&& fresh_impl) {
auto node = Ref<LeafNode>(new LeafNode(
fresh_impl.impl.get(), std::move(fresh_impl.impl)));
return fresh_node_t{node, fresh_impl.mut};
}
/// Returns the next tree_cursor_t in tree, can be end if there's no next.
- eagain_future<Ref<tree_cursor_t>> get_next(context_t);
+ eagain_ifuture<Ref<tree_cursor_t>> get_next(context_t);
/// Check that this is next to prv
void assert_next_to(const tree_cursor_t&, value_magic_t) const;
/// Erases the key-value pair from tree.
template <bool FORCE_MERGE = false>
- eagain_future<Ref<tree_cursor_t>> erase(context_t, bool get_next);
+ eagain_ifuture<Ref<tree_cursor_t>> erase(context_t, bool get_next);
MatchKindCMP compare_to(const tree_cursor_t&, value_magic_t) const;
}
/// Extends the size of value payload.
- eagain_future<> extend_value(context_t, value_size_t);
+ eagain_ifuture<> extend_value(context_t, value_size_t);
/// Trim and shrink the value payload.
- eagain_future<> trim_value(context_t, value_size_t);
+ eagain_ifuture<> trim_value(context_t, value_size_t);
static Ref<tree_cursor_t> get_invalid() {
static Ref<tree_cursor_t> INVALID = new tree_cursor_t();
*
* Returns an end cursor if it is an empty root node.
*/
- virtual eagain_future<Ref<tree_cursor_t>> lookup_smallest(context_t) = 0;
+ virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) = 0;
/**
* lookup_largest
*
* Returns an end cursor if it is an empty root node.
*/
- virtual eagain_future<Ref<tree_cursor_t>> lookup_largest(context_t) = 0;
+ virtual eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) = 0;
/**
* lower_bound
* - It is an empty root node;
* - Or the input key is larger than all the keys in the sub-tree;
*/
- eagain_future<search_result_t> lower_bound(context_t c, const key_hobj_t& key);
+ eagain_ifuture<search_result_t> lower_bound(context_t c, const key_hobj_t& key);
/**
* insert
* - If true, the returned cursor points to the inserted element in tree;
* - If false, the returned cursor points to the conflicting element in tree;
*/
- eagain_future<std::pair<Ref<tree_cursor_t>, bool>> insert(
+ eagain_ifuture<std::pair<Ref<tree_cursor_t>, bool>> insert(
context_t, const key_hobj_t&, value_config_t, Ref<Node>&&);
/**
*
* Returns the number of erased key-value pairs (0 or 1).
*/
- eagain_future<std::size_t> erase(context_t, const key_hobj_t&, Ref<Node>&&);
+ eagain_ifuture<std::size_t> erase(context_t, const key_hobj_t&, Ref<Node>&&);
/// Recursively collects the statistics of the sub-tree formed by this node
- eagain_future<tree_stats_t> get_tree_stats(context_t);
+ eagain_ifuture<tree_stats_t> get_tree_stats(context_t);
/// Returns an ostream containing a dump of all the elements in the node.
std::ostream& dump(std::ostream&) const;
static eagain_future<> mkfs(context_t, RootNodeTracker&);
/// Loads the tree root. The tree must be initialized.
- static eagain_future<Ref<Node>> load_root(context_t, RootNodeTracker&);
+ static eagain_ifuture<Ref<Node>> load_root(context_t, RootNodeTracker&);
// Only for unit test purposes.
void test_make_destructable(context_t, NodeExtentMutable&, Super::URef&&);
- virtual eagain_future<> test_clone_root(context_t, RootNodeTracker&) const = 0;
+ virtual eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const = 0;
protected:
- virtual eagain_future<> test_clone_non_root(context_t, Ref<InternalNode>) const {
+ virtual eagain_ifuture<> test_clone_non_root(context_t, Ref<InternalNode>) const {
ceph_abort("impossible path");
}
- virtual eagain_future<search_result_t> lower_bound_tracked(
+ virtual eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) = 0;
- virtual eagain_future<> do_get_tree_stats(context_t, tree_stats_t&) = 0;
+ virtual eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) = 0;
virtual bool is_tracking() const = 0;
make_root(c, std::move(_super));
}
void as_root(Super::URef&& _super);
- eagain_future<> upgrade_root(context_t);
+ eagain_ifuture<> upgrade_root(context_t);
Super::URef deref_super();
Ref<InternalNode> deref_parent();
- eagain_future<> apply_split_to_parent(context_t, Ref<Node>&&, Ref<Node>&&, bool);
- eagain_future<Ref<tree_cursor_t>> get_next_cursor_from_parent(context_t);
+ eagain_ifuture<> apply_split_to_parent(context_t, Ref<Node>&&, Ref<Node>&&, bool);
+ eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor_from_parent(context_t);
template <bool FORCE_MERGE = false>
- eagain_future<> try_merge_adjacent(context_t, bool, Ref<Node>&&);
- eagain_future<> erase_node(context_t, Ref<Node>&&);
+ eagain_ifuture<> try_merge_adjacent(context_t, bool, Ref<Node>&&);
+ eagain_ifuture<> erase_node(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
- eagain_future<> fix_parent_index(context_t, Ref<Node>&&, bool);
- eagain_future<NodeExtentMutable> rebuild_extent(context_t);
- eagain_future<> retire(context_t, Ref<Node>&&);
+ eagain_ifuture<> fix_parent_index(context_t, Ref<Node>&&, bool);
+ eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t);
+ eagain_ifuture<> retire(context_t, Ref<Node>&&);
void make_tail(context_t);
private:
std::optional<parent_info_t> _parent_info;
private:
- static eagain_future<Ref<Node>> load(context_t, laddr_t, bool expect_is_level_tail);
+ static eagain_ifuture<Ref<Node>> load(context_t, laddr_t, bool expect_is_level_tail);
NodeImplURef impl;
friend class InternalNode;
InternalNode& operator=(const InternalNode&) = delete;
InternalNode& operator=(InternalNode&&) = delete;
- eagain_future<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
+ eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
- eagain_future<> apply_child_split(context_t, Ref<Node>&& left, Ref<Node>&& right, bool);
+ eagain_ifuture<> apply_child_split(context_t, Ref<Node>&& left, Ref<Node>&& right, bool);
template <bool VALIDATE>
void do_track_child(Node& child) {
}
}
- eagain_future<std::pair<Ref<Node>, Ref<Node>>> get_child_peers(
+ eagain_ifuture<std::pair<Ref<Node>, Ref<Node>>> get_child_peers(
context_t, const search_position_t&);
- eagain_future<> erase_child(context_t, Ref<Node>&&);
+ eagain_ifuture<> erase_child(context_t, Ref<Node>&&);
template <bool FORCE_MERGE = false>
- eagain_future<> fix_index(context_t, Ref<Node>&&, bool);
+ eagain_ifuture<> fix_index(context_t, Ref<Node>&&, bool);
template <bool FORCE_MERGE = false>
- eagain_future<> apply_children_merge(
+ eagain_ifuture<> apply_children_merge(
context_t, Ref<Node>&& left, laddr_t, Ref<Node>&& right, bool update_index);
void validate_child_tracked(const Node& child) const {
void track_make_tail(const search_position_t&);
- static eagain_future<Ref<InternalNode>> allocate_root(
+ static eagain_ifuture<Ref<InternalNode>> allocate_root(
context_t, level_t, laddr_t, Super::URef&&);
protected:
- eagain_future<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
- eagain_future<Ref<tree_cursor_t>> lookup_largest(context_t) override;
- eagain_future<search_result_t> lower_bound_tracked(
+ eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
+ eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
+ eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
- eagain_future<> do_get_tree_stats(context_t, tree_stats_t&) override;
+ eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_child_nodes.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
- eagain_future<> test_clone_root(context_t, RootNodeTracker&) const override;
+ eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
- eagain_future<> try_downgrade_root(context_t, Ref<Node>&&);
+ eagain_ifuture<> try_downgrade_root(context_t, Ref<Node>&&);
- eagain_future<Ref<InternalNode>> insert_or_split(
+ eagain_ifuture<Ref<InternalNode>> insert_or_split(
context_t, const search_position_t&, const key_view_t&, Ref<Node>,
Ref<Node> outdated_child=nullptr);
// XXX: extract a common tracker for InternalNode to track Node,
// and LeafNode to track tree_cursor_t.
- eagain_future<Ref<Node>> get_or_track_child(context_t, const search_position_t&, laddr_t);
+ eagain_ifuture<Ref<Node>> get_or_track_child(context_t, const search_position_t&, laddr_t);
template <bool VALIDATE = true>
void track_insert(
const search_position_t&, match_stage_t, Ref<Node>, Ref<Node> nxt_child = nullptr);
return std::make_pair(Ref<Node>(node), mut);
}
};
- static eagain_future<fresh_node_t> allocate(context_t, field_type_t, bool, level_t);
+ static eagain_ifuture<fresh_node_t> allocate(context_t, field_type_t, bool, level_t);
private:
/**
const char* read() const;
extent_len_t get_node_size() const;
std::tuple<key_view_t, const value_header_t*> get_kv(const search_position_t&) const;
- eagain_future<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
+ eagain_ifuture<Ref<tree_cursor_t>> get_next_cursor(context_t, const search_position_t&);
/**
* erase
* pair that followed the erased element, which can be nullptr if is end.
*/
template <bool FORCE_MERGE>
- eagain_future<Ref<tree_cursor_t>> erase(
+ eagain_ifuture<Ref<tree_cursor_t>> erase(
context_t, const search_position_t&, bool get_next);
template <bool VALIDATE>
}
}
- eagain_future<> extend_value(context_t, const search_position_t&, value_size_t);
- eagain_future<> trim_value(context_t, const search_position_t&, value_size_t);
+ eagain_ifuture<> extend_value(context_t, const search_position_t&, value_size_t);
+ eagain_ifuture<> trim_value(context_t, const search_position_t&, value_size_t);
std::pair<NodeExtentMutable&, ValueDeltaRecorder*>
prepare_mutate_value_payload(context_t);
protected:
- eagain_future<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
- eagain_future<Ref<tree_cursor_t>> lookup_largest(context_t) override;
- eagain_future<search_result_t> lower_bound_tracked(
+ eagain_ifuture<Ref<tree_cursor_t>> lookup_smallest(context_t) override;
+ eagain_ifuture<Ref<tree_cursor_t>> lookup_largest(context_t) override;
+ eagain_ifuture<search_result_t> lower_bound_tracked(
context_t, const key_hobj_t&, MatchHistory&) override;
- eagain_future<> do_get_tree_stats(context_t, tree_stats_t&) override;
+ eagain_ifuture<> do_get_tree_stats(context_t, tree_stats_t&) override;
bool is_tracking() const override {
return !tracked_cursors.empty();
}
void track_merge(Ref<Node>, match_stage_t, search_position_t&) override;
- eagain_future<> test_clone_root(context_t, RootNodeTracker&) const override;
+ eagain_ifuture<> test_clone_root(context_t, RootNodeTracker&) const override;
private:
LeafNode(LeafNodeImpl*, NodeImplURef&&);
- eagain_future<Ref<tree_cursor_t>> insert_value(
+ eagain_ifuture<Ref<tree_cursor_t>> insert_value(
context_t, const key_hobj_t&, value_config_t,
const search_position_t&, const MatchHistory&,
match_stat_t mstat);
- static eagain_future<Ref<LeafNode>> allocate_root(context_t, RootNodeTracker&);
+ static eagain_ifuture<Ref<LeafNode>> allocate_root(context_t, RootNodeTracker&);
friend class Node;
private:
return std::make_pair(Ref<Node>(node), mut);
}
};
- static eagain_future<fresh_node_t> allocate(context_t, field_type_t, bool);
+ static eagain_ifuture<fresh_node_t> allocate(context_t, field_type_t, bool);
private:
/**
std::memcpy(to.get_write(), extent->get_read(), get_length());
}
- eagain_future<NodeExtentMutable> rebuild(context_t c) {
+ eagain_ifuture<NodeExtentMutable> rebuild(context_t c) {
LOG_PREFIX(OTree::Extent::rebuild);
assert(!is_retired());
if (state == nextent_state_t::FRESH) {
assert(extent->is_initial_pending());
// already fresh and no need to record
- return eagain_ertr::make_ready_future<NodeExtentMutable>(*mut);
+ return eagain_iertr::make_ready_future<NodeExtentMutable>(*mut);
}
assert(!extent->is_initial_pending());
auto alloc_size = get_length();
return c.nm.alloc_extent(c.t, alloc_size
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, alloc_size, l_to_discard = extent->get_laddr()] {
ERRORT("EIO during allocate -- node_size={}, to_discard={:x}",
c.t, alloc_size, l_to_discard);
ceph_abort("fatal error");
})
- ).safe_then([this, c, FNAME] (auto fresh_extent) {
+ ).si_then([this, c, FNAME] (auto fresh_extent) {
DEBUGT("update addr from {:#x} to {:#x} ...",
c.t, extent->get_laddr(), fresh_extent->get_laddr());
assert(fresh_extent->is_initial_pending());
recorder = nullptr;
return c.nm.retire_extent(c.t, to_discard
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, l_to_discard = to_discard->get_laddr(),
l_fresh = fresh_extent->get_laddr()] {
ceph_abort("fatal error");
})
);
- }).safe_then([this] {
+ }).si_then([this] {
return *mut;
});
}
- eagain_future<> retire(context_t c) {
+ eagain_ifuture<> retire(context_t c) {
LOG_PREFIX(OTree::Extent::retire);
assert(!is_retired());
auto addr = extent->get_laddr();
return c.nm.retire_extent(c.t, std::move(extent)
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, addr] {
ERRORT("EIO -- addr={:x}", c.t, addr);
}
NodeExtentManagerURef NodeExtentManager::create_seastore(
- InterruptedTransactionManager tm, laddr_t min_laddr, double p_eagain)
+ TransactionManager &tm, laddr_t min_laddr, double p_eagain)
{
if (p_eagain == 0.0) {
return NodeExtentManagerURef(
using base_ertr = eagain_ertr::extend<
crimson::ct_error::input_output_error>;
+ using eagain_iertr = trans_iertr<eagain_ertr>;
+ using base_iertr = eagain_iertr::extend<
+ crimson::ct_error::input_output_error>;
public:
virtual ~NodeExtentManager() = default;
virtual bool is_read_isolated() const = 0;
- using read_ertr = base_ertr::extend<
+ using read_iertr = base_iertr::extend<
crimson::ct_error::invarg,
crimson::ct_error::enoent,
crimson::ct_error::erange>;
- virtual read_ertr::future<NodeExtentRef> read_extent(
+ virtual read_iertr::future<NodeExtentRef> read_extent(
Transaction&, laddr_t) = 0;
- using alloc_ertr = base_ertr;
- virtual alloc_ertr::future<NodeExtentRef> alloc_extent(
+ using alloc_iertr = base_iertr;
+ virtual alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction&, extent_len_t) = 0;
- using retire_ertr = base_ertr::extend<
+ using retire_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
- virtual retire_ertr::future<> retire_extent(
+ virtual retire_iertr::future<> retire_extent(
Transaction&, NodeExtentRef) = 0;
- using getsuper_ertr = base_ertr;
- virtual getsuper_ertr::future<Super::URef> get_super(
+ using getsuper_iertr = base_iertr;
+ virtual getsuper_iertr::future<Super::URef> get_super(
Transaction&, RootNodeTracker&) = 0;
virtual std::ostream& print(std::ostream& os) const = 0;
static NodeExtentManagerURef create_dummy(bool is_sync);
static NodeExtentManagerURef create_seastore(
- InterruptedTransactionManager tm, laddr_t min_laddr = L_ADDR_MIN, double p_eagain = 0.0);
+ TransactionManager &tm, laddr_t min_laddr = L_ADDR_MIN, double p_eagain = 0.0);
};
inline std::ostream& operator<<(std::ostream& os, const NodeExtentManager& nm) {
return nm.print(os);
protected:
bool is_read_isolated() const override { return false; }
- read_ertr::future<NodeExtentRef> read_extent(
+ read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
TRACET("reading at {:#x} ...", t, addr);
if constexpr (SYNC) {
}
}
- alloc_ertr::future<NodeExtentRef> alloc_extent(
+ alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, extent_len_t len) override {
TRACET("allocating {}B ...", t, len);
if constexpr (SYNC) {
}
}
- retire_ertr::future<> retire_extent(
+ retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef extent) override {
TRACET("retiring {}B at {:#x} -- {} ...",
t, extent->get_length(), extent->get_laddr(), *extent);
}
}
- getsuper_ertr::future<Super::URef> get_super(
+ getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
TRACET("get root ...", t);
if constexpr (SYNC) {
}
private:
- read_ertr::future<NodeExtentRef> read_extent_sync(
+ read_iertr::future<NodeExtentRef> read_extent_sync(
Transaction& t, laddr_t addr) {
auto iter = allocate_map.find(addr);
assert(iter != allocate_map.end());
TRACET("read {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_laddr() == addr);
- return read_ertr::make_ready_future<NodeExtentRef>(extent);
+ return read_iertr::make_ready_future<NodeExtentRef>(extent);
}
- alloc_ertr::future<NodeExtentRef> alloc_extent_sync(
+ alloc_iertr::future<NodeExtentRef> alloc_extent_sync(
Transaction& t, extent_len_t len) {
assert(len % ALIGNMENT == 0);
auto r = ceph::buffer::create_aligned(len, ALIGNMENT);
DEBUGT("allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
assert(extent->get_length() == len);
- return alloc_ertr::make_ready_future<NodeExtentRef>(extent);
+ return alloc_iertr::make_ready_future<NodeExtentRef>(extent);
}
- retire_ertr::future<> retire_extent_sync(
+ retire_iertr::future<> retire_extent_sync(
Transaction& t, NodeExtentRef _extent) {
auto& extent = static_cast<DummyNodeExtent&>(*_extent.get());
auto addr = extent.get_laddr();
assert(iter != allocate_map.end());
allocate_map.erase(iter);
DEBUGT("retired {}B at {:#x}", t, len, addr);
- return retire_ertr::now();
+ return retire_iertr::now();
}
- getsuper_ertr::future<Super::URef> get_super_sync(
+ getsuper_iertr::future<Super::URef> get_super_sync(
Transaction& t, RootNodeTracker& tracker) {
TRACET("got root {:#x}", t, root_laddr);
- return getsuper_ertr::make_ready_future<Super::URef>(
+ return getsuper_iertr::make_ready_future<Super::URef>(
Super::URef(new DummySuper(t, tracker, &root_laddr)));
}
class SeastoreSuper final: public Super {
public:
SeastoreSuper(Transaction& t, RootNodeTracker& tracker,
- laddr_t root_addr, InterruptedTransactionManager& tm)
+ laddr_t root_addr, TransactionManager& tm)
: Super(t, tracker), root_addr{root_addr}, tm{tm} {}
~SeastoreSuper() override = default;
protected:
}
private:
laddr_t root_addr;
- InterruptedTransactionManager tm;
+ TransactionManager &tm;
};
class SeastoreNodeExtent final: public NodeExtent {
class TransactionManagerHandle : public NodeExtentManager {
public:
- TransactionManagerHandle(InterruptedTransactionManager tm) : tm{tm} {}
- InterruptedTransactionManager tm;
+ TransactionManagerHandle(TransactionManager &tm) : tm{tm} {}
+ TransactionManager &tm;
};
template <bool INJECT_EAGAIN=false>
class SeastoreNodeExtentManager final: public TransactionManagerHandle {
public:
SeastoreNodeExtentManager(
- InterruptedTransactionManager tm, laddr_t min, double p_eagain)
+ TransactionManager &tm, laddr_t min, double p_eagain)
: TransactionManagerHandle(tm), addr_min{min}, p_eagain{p_eagain} {
if constexpr (INJECT_EAGAIN) {
assert(p_eagain > 0.0 && p_eagain < 1.0);
protected:
bool is_read_isolated() const override { return true; }
- read_ertr::future<NodeExtentRef> read_extent(
+ read_iertr::future<NodeExtentRef> read_extent(
Transaction& t, laddr_t addr) override {
TRACET("reading at {:#x} ...", t, addr);
if constexpr (INJECT_EAGAIN) {
}
}
return tm.read_extent<SeastoreNodeExtent>(t, addr
- ).safe_then([addr, &t](auto&& e) -> read_ertr::future<NodeExtentRef> {
+ ).si_then([addr, &t](auto&& e) -> read_iertr::future<NodeExtentRef> {
TRACET("read {}B at {:#x} -- {}",
t, e->get_length(), e->get_laddr(), *e);
if (t.is_conflicted()) {
assert(e->is_valid());
assert(e->get_laddr() == addr);
std::ignore = addr;
- return read_ertr::make_ready_future<NodeExtentRef>(e);
+ return read_iertr::make_ready_future<NodeExtentRef>(e);
});
}
- alloc_ertr::future<NodeExtentRef> alloc_extent(
+ alloc_iertr::future<NodeExtentRef> alloc_extent(
Transaction& t, extent_len_t len) override {
TRACET("allocating {}B ...", t, len);
if constexpr (INJECT_EAGAIN) {
}
}
return tm.alloc_extent<SeastoreNodeExtent>(t, addr_min, len
- ).safe_then([len, &t](auto extent) {
+ ).si_then([len, &t](auto extent) {
DEBUGT("allocated {}B at {:#x} -- {}",
t, extent->get_length(), extent->get_laddr(), *extent);
if (!extent->is_initial_pending()) {
});
}
- retire_ertr::future<> retire_extent(
+ retire_iertr::future<> retire_extent(
Transaction& t, NodeExtentRef _extent) override {
LogicalCachedExtentRef extent = _extent;
auto addr = extent->get_laddr();
return crimson::ct_error::eagain::make();
}
}
- return tm.dec_ref(t, extent).safe_then([addr, len, &t] (unsigned cnt) {
+ return tm.dec_ref(t, extent).si_then([addr, len, &t] (unsigned cnt) {
assert(cnt == 0);
TRACET("retired {}B at {:#x} ...", t, len, addr);
});
}
- getsuper_ertr::future<Super::URef> get_super(
+ getsuper_iertr::future<Super::URef> get_super(
Transaction& t, RootNodeTracker& tracker) override {
TRACET("get root ...", t);
if constexpr (INJECT_EAGAIN) {
return crimson::ct_error::eagain::make();
}
}
- return tm.read_onode_root(t).safe_then([this, &t, &tracker](auto root_addr) {
+ return tm.read_onode_root(t).si_then([this, &t, &tracker](auto root_addr) {
TRACET("got root {:#x}", t, root_addr);
return Super::URef(new SeastoreSuper(t, tracker, root_addr, tm));
});
#endif
// XXX: branchless allocation
-eagain_future<InternalNodeImpl::fresh_impl_t>
+eagain_ifuture<InternalNodeImpl::fresh_impl_t>
InternalNodeImpl::allocate(
context_t c, field_type_t type, bool is_level_tail, level_t level)
{
}
}
-eagain_future<LeafNodeImpl::fresh_impl_t>
+eagain_ifuture<LeafNodeImpl::fresh_impl_t>
LeafNodeImpl::allocate(
context_t c, field_type_t type, bool is_level_tail)
{
virtual std::tuple<match_stage_t, search_position_t> erase(const search_position_t&) = 0;
virtual std::tuple<match_stage_t, std::size_t> evaluate_merge(NodeImpl&) = 0;
virtual search_position_t merge(NodeExtentMutable&, NodeImpl&, match_stage_t, extent_len_t) = 0;
- virtual eagain_future<NodeExtentMutable> rebuild_extent(context_t) = 0;
- virtual eagain_future<> retire_extent(context_t) = 0;
+ virtual eagain_ifuture<NodeExtentMutable> rebuild_extent(context_t) = 0;
+ virtual eagain_ifuture<> retire_extent(context_t) = 0;
virtual search_position_t make_tail() = 0;
virtual node_stats_t get_stats() const = 0;
return {std::move(impl), mut};
}
};
- static eagain_future<fresh_impl_t> allocate(context_t, field_type_t, bool, level_t);
+ static eagain_ifuture<fresh_impl_t> allocate(context_t, field_type_t, bool, level_t);
static InternalNodeImplURef load(NodeExtentRef, field_type_t);
return {std::move(impl), mut};
}
};
- static eagain_future<fresh_impl_t> allocate(context_t, field_type_t, bool);
+ static eagain_ifuture<fresh_impl_t> allocate(context_t, field_type_t, bool);
static LeafNodeImplURef load(NodeExtentRef, field_type_t);
return ret;
}
- static eagain_future<typename parent_t::fresh_impl_t> allocate(
+ static eagain_ifuture<typename parent_t::fresh_impl_t> allocate(
context_t c, bool is_level_tail, level_t level) {
LOG_PREFIX(OTree::Layout::allocate);
extent_len_t extent_size;
extent_size = c.vb.get_internal_node_size();
}
return c.nm.alloc_extent(c.t, extent_size
- ).handle_error(
- eagain_ertr::pass_further{},
+ ).handle_error_interruptible(
+ eagain_iertr::pass_further{},
crimson::ct_error::input_output_error::handle(
[FNAME, c, extent_size, is_level_tail, level] {
ERRORT("EIO -- extent_size={}, is_level_tail={}, level={}",
c.t, extent_size, is_level_tail, level);
ceph_abort("fatal error");
})
- ).safe_then([is_level_tail, level](auto extent) {
+ ).si_then([is_level_tail, level](auto extent) {
assert(extent->is_initial_pending());
auto mut = extent->get_mutable();
node_stage_t::bootstrap_extent(
return normalize(std::move(left_last_pos));
}
- eagain_future<NodeExtentMutable>
+ eagain_ifuture<NodeExtentMutable>
rebuild_extent(context_t c) override {
- return extent.rebuild(c).safe_then([this] (auto mut) {
+ return extent.rebuild(c).si_then([this] (auto mut) {
// addr may change
build_name();
return mut;
});
}
- eagain_future<> retire_extent(context_t c) override {
+ eagain_ifuture<> retire_extent(context_t c) override {
return extent.retire(c);
}
bool operator==(const Cursor& o) const { return (int)compare_to(o) == 0; }
bool operator!=(const Cursor& o) const { return (int)compare_to(o) != 0; }
- eagain_future<Cursor> get_next(Transaction& t) {
+ eagain_ifuture<Cursor> get_next(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->get_next(p_tree->get_context(t)
- ).safe_then([this_obj] (Ref<tree_cursor_t> next_cursor) {
+ ).si_then([this_obj] (Ref<tree_cursor_t> next_cursor) {
next_cursor->assert_next_to(
*this_obj.p_cursor, this_obj.p_tree->value_builder.get_header_magic());
auto ret = Cursor{this_obj.p_tree, next_cursor};
}
template <bool FORCE_MERGE = false>
- eagain_future<Cursor> erase(Transaction& t) {
+ eagain_ifuture<Cursor> erase(Transaction& t) {
assert(!is_end());
auto this_obj = *this;
return p_cursor->erase<FORCE_MERGE>(p_tree->get_context(t), true
- ).safe_then([this_obj, this] (Ref<tree_cursor_t> next_cursor) {
- assert(p_cursor->is_invalid());
+ ).si_then([this_obj, this] (Ref<tree_cursor_t> next_cursor) {
if (next_cursor) {
assert(!next_cursor->is_end());
return Cursor{p_tree, next_cursor};
* lookup
*/
- eagain_future<Cursor> begin(Transaction& t) {
- return get_root(t).safe_then([this, &t](auto root) {
+ eagain_ifuture<Cursor> begin(Transaction& t) {
+ return get_root(t).si_then([this, &t](auto root) {
return root->lookup_smallest(get_context(t));
- }).safe_then([this](auto cursor) {
+ }).si_then([this](auto cursor) {
return Cursor{this, cursor};
});
}
- eagain_future<Cursor> last(Transaction& t) {
- return get_root(t).safe_then([this, &t](auto root) {
+ eagain_ifuture<Cursor> last(Transaction& t) {
+ return get_root(t).si_then([this, &t](auto root) {
return root->lookup_largest(get_context(t));
- }).safe_then([this](auto cursor) {
+ }).si_then([this](auto cursor) {
return Cursor(this, cursor);
});
}
return Cursor::make_end(this);
}
- eagain_future<bool> contains(Transaction& t, const ghobject_t& obj) {
+ eagain_ifuture<bool> contains(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
full_key_t<KeyT::HOBJ>(obj),
- [this, &t](auto& key) -> eagain_future<bool> {
- return get_root(t).safe_then([this, &t, &key](auto root) {
+ [this, &t](auto& key) -> eagain_ifuture<bool> {
+ return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
- }).safe_then([](auto result) {
+ }).si_then([](auto result) {
return MatchKindBS::EQ == result.match();
});
}
);
}
- eagain_future<Cursor> find(Transaction& t, const ghobject_t& obj) {
+ eagain_ifuture<Cursor> find(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
full_key_t<KeyT::HOBJ>(obj),
- [this, &t](auto& key) -> eagain_future<Cursor> {
- return get_root(t).safe_then([this, &t, &key](auto root) {
+ [this, &t](auto& key) -> eagain_ifuture<Cursor> {
+ return get_root(t).si_then([this, &t, &key](auto root) {
// TODO: improve lower_bound()
return root->lower_bound(get_context(t), key);
- }).safe_then([this](auto result) {
+ }).si_then([this](auto result) {
if (result.match() == MatchKindBS::EQ) {
return Cursor(this, result.p_cursor);
} else {
);
}
- eagain_future<Cursor> lower_bound(Transaction& t, const ghobject_t& obj) {
+ eagain_ifuture<Cursor> lower_bound(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
full_key_t<KeyT::HOBJ>(obj),
- [this, &t](auto& key) -> eagain_future<Cursor> {
- return get_root(t).safe_then([this, &t, &key](auto root) {
+ [this, &t](auto& key) -> eagain_ifuture<Cursor> {
+ return get_root(t).si_then([this, &t, &key](auto root) {
return root->lower_bound(get_context(t), key);
- }).safe_then([this](auto result) {
+ }).si_then([this](auto result) {
return Cursor(this, result.p_cursor);
});
}
);
}
- eagain_future<Cursor> get_next(Transaction& t, Cursor& cursor) {
+ eagain_ifuture<Cursor> get_next(Transaction& t, Cursor& cursor) {
return cursor.get_next(t);
}
struct tree_value_config_t {
value_size_t payload_size = 256;
};
- using insert_ertr = eagain_ertr::extend<
+ using insert_iertr = eagain_iertr::extend<
crimson::ct_error::value_too_large>;
- insert_ertr::future<std::pair<Cursor, bool>>
+ insert_iertr::future<std::pair<Cursor, bool>>
insert(Transaction& t, const ghobject_t& obj, tree_value_config_t _vconf) {
LOG_PREFIX(OTree::insert);
if (_vconf.payload_size > value_builder.get_max_value_payload_size()) {
value_config_t vconf{value_builder.get_header_magic(), _vconf.payload_size};
return seastar::do_with(
full_key_t<KeyT::HOBJ>(obj),
- [this, &t, vconf](auto& key) -> eagain_future<std::pair<Cursor, bool>> {
+ [this, &t, vconf](auto& key) -> eagain_ifuture<std::pair<Cursor, bool>> {
ceph_assert(key.is_valid());
- return get_root(t).safe_then([this, &t, &key, vconf](auto root) {
+ return get_root(t).si_then([this, &t, &key, vconf](auto root) {
return root->insert(get_context(t), key, vconf, std::move(root));
- }).safe_then([this](auto ret) {
+ }).si_then([this](auto ret) {
auto& [cursor, success] = ret;
return std::make_pair(Cursor(this, cursor), success);
});
);
}
- eagain_future<std::size_t> erase(Transaction& t, const ghobject_t& obj) {
+ eagain_ifuture<std::size_t> erase(Transaction& t, const ghobject_t& obj) {
return seastar::do_with(
full_key_t<KeyT::HOBJ>(obj),
- [this, &t](auto& key) -> eagain_future<std::size_t> {
- return get_root(t).safe_then([this, &t, &key](auto root) {
+ [this, &t](auto& key) -> eagain_ifuture<std::size_t> {
+ return get_root(t).si_then([this, &t, &key](auto root) {
return root->erase(get_context(t), key, std::move(root));
});
}
);
}
- eagain_future<Cursor> erase(Transaction& t, Cursor& pos) {
+ eagain_ifuture<Cursor> erase(Transaction& t, Cursor& pos) {
return pos.erase(t);
}
- eagain_future<> erase(Transaction& t, Value& value) {
+ eagain_ifuture<> erase(Transaction& t, Value& value) {
assert(value.is_tracked());
auto ref_cursor = value.p_cursor;
return ref_cursor->erase(get_context(t), false
- ).safe_then([ref_cursor] (auto next_cursor) {
+ ).si_then([ref_cursor] (auto next_cursor) {
assert(ref_cursor->is_invalid());
assert(!next_cursor);
});
* stats
*/
- eagain_future<size_t> height(Transaction& t) {
- return get_root(t).safe_then([](auto root) {
+ eagain_ifuture<size_t> height(Transaction& t) {
+ return get_root(t).si_then([](auto root) {
return size_t(root->level() + 1);
});
}
- eagain_future<tree_stats_t> get_stats_slow(Transaction& t) {
- return get_root(t).safe_then([this, &t](auto root) {
+ eagain_ifuture<tree_stats_t> get_stats_slow(Transaction& t) {
+ return get_root(t).si_then([this, &t](auto root) {
unsigned height = root->level() + 1;
return root->get_tree_stats(get_context(t)
- ).safe_then([height](auto stats) {
+ ).si_then([height](auto stats) {
stats.height = height;
return seastar::make_ready_future<tree_stats_t>(stats);
});
return root_tracker->is_clean();
}
- eagain_future<> test_clone_from(
+ eagain_ifuture<> test_clone_from(
Transaction& t, Transaction& t_from, Btree& from) {
// Note: assume the tree to clone is tracked correctly in memory.
// In some unit tests, parts of the tree are stubbed out that they
// should not be loaded from NodeExtentManager.
return from.get_root(t_from
- ).safe_then([this, &t](auto root_from) {
+ ).si_then([this, &t](auto root_from) {
return root_from->test_clone_root(get_context(t), *root_tracker);
});
}
return {*nm, value_builder, t};
}
- eagain_future<Ref<Node>> get_root(Transaction& t) {
+ eagain_ifuture<Ref<Node>> get_root(Transaction& t) {
auto root = root_tracker->get_root(t);
if (root) {
return seastar::make_ready_future<Ref<Node>>(root);
tree.emplace(std::move(nm));
}
- eagain_future<> bootstrap(Transaction& t) {
+ eagain_ifuture<> bootstrap(Transaction& t) {
std::ostringstream oss;
#ifndef NDEBUG
oss << "debug=on, ";
return tree->mkfs(t);
}
- eagain_future<BtreeCursor> insert_one(
+ eagain_ifuture<BtreeCursor> insert_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] insert {} -> {}",
p_kv->value);
return tree->insert(
t, p_kv->key, {p_kv->value.get_payload_size()}
- ).safe_then([&t, this, p_kv](auto ret) {
+ ).si_then([&t, this, p_kv](auto ret) {
auto success = ret.second;
auto cursor = std::move(ret.first);
initialize_cursor_from_item(t, p_kv->key, p_kv->value, cursor, success);
#ifndef NDEBUG
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return tree->find(t, p_kv->key
- ).safe_then([this, cursor, p_kv](auto cursor_) mutable {
+ ).si_then([this, cursor, p_kv](auto cursor_) mutable {
assert(!cursor_.is_end());
ceph_assert(cursor_.get_ghobj() == p_kv->key);
ceph_assert(cursor_.value() == cursor.value());
return cursor;
});
#else
- return eagain_ertr::make_ready_future<BtreeCursor>(cursor);
+ return eagain_iertr::make_ready_future<BtreeCursor>(cursor);
#endif
- }).handle_error(
+ }).handle_error_interruptible(
[] (const crimson::ct_error::value_too_large& e) {
ceph_abort("impossible path");
},
);
}
- eagain_future<> insert(Transaction& t) {
+ eagain_ifuture<> insert(Transaction& t) {
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
*ref_kv_iter = kvs.random_begin();
auto cursors = seastar::make_lw_shared<std::vector<BtreeCursor>>();
logger().warn("start inserting {} kvs ...", kvs.size());
auto start_time = mono_clock::now();
- return crimson::repeat([&t, this, cursors, ref_kv_iter,
+ return trans_intr::repeat([&t, this, cursors, ref_kv_iter,
start_time]()
- -> eagain_future<seastar::stop_iteration> {
+ -> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Insert done! {}s", duration.count());
seastar::stop_iteration::yes);
} else {
return insert_one(t, *ref_kv_iter
- ).safe_then([cursors, ref_kv_iter] (auto cursor) {
+ ).si_then([cursors, ref_kv_iter] (auto cursor) {
if constexpr (TRACK) {
cursors->emplace_back(cursor);
}
return seastar::stop_iteration::no;
});
}
- }).safe_then([&t, this, cursors, ref_kv_iter] {
+ }).si_then([&t, this, cursors, ref_kv_iter] {
if (!cursors->empty()) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
return seastar::do_with(
cursors->begin(),
[&t, this, cursors, ref_kv_iter] (auto& c_iter) {
- return crimson::repeat(
+ return trans_intr::repeat(
[&t, this, &c_iter, cursors, ref_kv_iter] ()
- -> eagain_future<seastar::stop_iteration> {
+ -> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.random_end()) {
logger().info("Verify done!");
return seastar::make_ready_future<seastar::stop_iteration>(
assert(c_iter != cursors->end());
auto p_kv = **ref_kv_iter;
// validate values in tree keep intact
- return tree->find(t, p_kv->key).safe_then([this, &c_iter, ref_kv_iter](auto cursor) {
+ return tree->find(t, p_kv->key).si_then([this, &c_iter, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
// validate values in cursors keep intact
});
});
} else {
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
});
}
- eagain_future<> erase_one(
+ eagain_ifuture<> erase_one(
Transaction& t, const iterator_t& iter_rd) {
auto p_kv = *iter_rd;
logger().debug("[{}] erase {} -> {}",
key_hobj_t{p_kv->key},
p_kv->value);
return tree->erase(t, p_kv->key
- ).safe_then([&t, this, p_kv] (auto size) {
+ ).si_then([&t, this, p_kv] (auto size) {
ceph_assert(size == 1);
#ifndef NDEBUG
return tree->contains(t, p_kv->key
- ).safe_then([] (bool ret) {
+ ).si_then([] (bool ret) {
ceph_assert(ret == false);
});
#else
- return eagain_ertr::now();
+ return eagain_iertr::now();
#endif
});
}
- eagain_future<> erase(Transaction& t, std::size_t erase_size) {
+ eagain_ifuture<> erase(Transaction& t, std::size_t erase_size) {
assert(erase_size <= kvs.size());
kvs.shuffle();
auto erase_end = kvs.random_begin() + erase_size;
auto ref_kv_iter = seastar::make_lw_shared<iterator_t>();
auto cursors = seastar::make_lw_shared<std::map<ghobject_t, BtreeCursor>>();
- return seastar::now().then([&t, this, cursors, ref_kv_iter] {
+ return eagain_iertr::now().si_then([&t, this, cursors, ref_kv_iter] {
if constexpr (TRACK) {
logger().info("Tracking cursors before erase ...");
*ref_kv_iter = kvs.begin();
auto start_time = mono_clock::now();
- return crimson::repeat(
+ return trans_intr::repeat(
[&t, this, cursors, ref_kv_iter, start_time] ()
- -> eagain_future<seastar::stop_iteration> {
+ -> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == kvs.end()) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().info("Track done! {}s", duration.count());
seastar::stop_iteration::yes);
}
auto p_kv = **ref_kv_iter;
- return tree->find(t, p_kv->key).safe_then([this, cursors, ref_kv_iter](auto cursor) {
+ return tree->find(t, p_kv->key).si_then([this, cursors, ref_kv_iter](auto cursor) {
auto p_kv = **ref_kv_iter;
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
cursors->emplace(p_kv->key, cursor);
});
});
} else {
- return eagain_ertr::now();
+ return eagain_iertr::now();
}
- }).safe_then([&t, this, ref_kv_iter, erase_end] {
+ }).si_then([&t, this, ref_kv_iter, erase_end] {
*ref_kv_iter = kvs.random_begin();
logger().warn("start erasing {}/{} kvs ...",
erase_end - kvs.random_begin(), kvs.size());
auto start_time = mono_clock::now();
- return crimson::repeat([&t, this, ref_kv_iter,
+ return trans_intr::repeat([&t, this, ref_kv_iter,
start_time, erase_end] ()
- -> eagain_future<seastar::stop_iteration> {
+ -> eagain_ifuture<seastar::stop_iteration> {
if (*ref_kv_iter == erase_end) {
std::chrono::duration<double> duration = mono_clock::now() - start_time;
logger().warn("Erase done! {}s", duration.count());
seastar::stop_iteration::yes);
} else {
return erase_one(t, *ref_kv_iter
- ).safe_then([ref_kv_iter] {
+ ).si_then([ref_kv_iter] {
++(*ref_kv_iter);
return seastar::stop_iteration::no;
});
}
});
- }).safe_then([this, cursors, ref_kv_iter, erase_end] {
+ }).si_then([this, cursors, ref_kv_iter, erase_end] {
if constexpr (TRACK) {
logger().info("Verifing tracked cursors ...");
*ref_kv_iter = kvs.random_begin();
});
}
- eagain_future<> get_stats(Transaction& t) {
+ eagain_ifuture<> get_stats(Transaction& t) {
return tree->get_stats_slow(t
- ).safe_then([this](auto stats) {
+ ).si_then([this](auto stats) {
logger().warn("{}", stats);
});
}
- eagain_future<std::size_t> height(Transaction& t) {
+ eagain_ifuture<std::size_t> height(Transaction& t) {
return tree->height(t);
}
tree.emplace(std::move(nm));
}
- eagain_future<> validate_one(
+ eagain_ifuture<> validate_one(
Transaction& t, const iterator_t& iter_seq) {
assert(iter_seq != kvs.end());
auto next_iter = iter_seq + 1;
auto p_kv = *iter_seq;
return tree->find(t, p_kv->key
- ).safe_then([p_kv, &t] (auto cursor) {
+ ).si_then([p_kv, &t] (auto cursor) {
validate_cursor_from_item(p_kv->key, p_kv->value, cursor);
return cursor.get_next(t);
- }).safe_then([next_iter, this] (auto cursor) {
+ }).si_then([next_iter, this] (auto cursor) {
if (next_iter == kvs.end()) {
ceph_assert(cursor.is_end());
} else {
});
}
- eagain_future<> validate(Transaction& t) {
- return seastar::async([this, &t] {
- logger().info("Verifing inserted ...");
- auto iter = kvs.begin();
- while (iter != kvs.end()) {
- validate_one(t, iter).unsafe_get0();
- ++iter;
- }
- logger().info("Verify done!");
+ eagain_ifuture<> validate(Transaction& t) {
+ logger().info("Verifing inserted ...");
+ return seastar::do_with(
+ kvs.begin(),
+ [this, &t] (auto &iter) {
+ return trans_intr::repeat(
+ [this, &t, &iter]() ->eagain_iertr::future<seastar::stop_iteration> {
+ if (iter == kvs.end()) {
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::yes);
+ }
+ return validate_one(t, iter).si_then([this, &iter] {
+ ++iter;
+ return seastar::make_ready_future<seastar::stop_iteration>(
+ seastar::stop_iteration::no);
+ });
+ });
});
}
p_cursor.reset();
}
-eagain_future<> Value::extend(Transaction& t, value_size_t extend_size)
+eagain_ifuture<> Value::extend(Transaction& t, value_size_t extend_size)
{
assert(is_tracked());
[[maybe_unused]] auto target_size = get_payload_size() + extend_size;
return p_cursor->extend_value(get_context(t), extend_size)
#ifndef NDEBUG
- .safe_then([this, target_size] {
+ .si_then([this, target_size] {
assert(target_size == get_payload_size());
})
#endif
;
}
-eagain_future<> Value::trim(Transaction& t, value_size_t trim_size)
+eagain_ifuture<> Value::trim(Transaction& t, value_size_t trim_size)
{
assert(is_tracked());
assert(get_payload_size() > trim_size);
[[maybe_unused]] auto target_size = get_payload_size() - trim_size;
return p_cursor->trim_value(get_context(t), trim_size)
#ifndef NDEBUG
- .safe_then([this, target_size] {
+ .si_then([this, target_size] {
assert(target_size == get_payload_size());
})
#endif
Value(NodeExtentManager&, const ValueBuilder&, Ref<tree_cursor_t>&);
/// Extends the payload size.
- eagain_future<> extend(Transaction&, value_size_t extend_size);
+ eagain_ifuture<> extend(Transaction&, value_size_t extend_size);
/// Trim and shrink the payload.
- eagain_future<> trim(Transaction&, value_size_t trim_size);
+ eagain_ifuture<> trim(Transaction&, value_size_t trim_size);
/// Get the permission to mutate the payload with the optional value recorder.
template <typename PayloadT, typename ValueDeltaRecorderT>
[this, start, end, limit] (auto& ret) {
return repeat_eagain2([this, start, end, limit, &ret] {
return seastar::do_with(
- transaction_manager->create_transaction(
- Transaction::src_t::READ),
- [this, start, end, limit, &ret] (auto& t) {
- return onode_manager->list_onodes(*t, start, end, limit
- ).safe_then([&ret] (auto&& _ret) {
- ret = std::move(_ret);
+ transaction_manager->create_transaction(
+ Transaction::src_t::READ),
+ [this, start, end, limit, &ret] (auto& t) {
+ return with_trans_intr(
+ *t,
+ [this, start, end, limit, &ret](auto &t) {
+ return onode_manager->list_onodes(t, start, end, limit);
+ }).safe_then([&ret] (auto&& _ret) {
+ ret = std::move(_ret);
});
});
}).then([&ret] {
Transaction::src_t::MUTATE,
op_type_t::TRANSACTION,
[this](auto &ctx) {
- return onode_manager->get_or_create_onodes(
- *ctx.transaction, ctx.iter.get_objects()
- ).safe_then([this, &ctx](auto &&read_onodes) {
+ return with_trans_intr(
+ *ctx.transaction,
+ [&](auto &t) {
+ return onode_manager->get_or_create_onodes(
+ *ctx.transaction, ctx.iter.get_objects());
+ }
+ ).safe_then([this, &ctx](auto &&read_onodes) {
ctx.onodes = std::move(read_onodes);
return crimson::repeat(
[this, &ctx]() -> tm_ertr::future<seastar::stop_iteration> {
};
});
}).safe_then([this, &ctx] {
- return onode_manager->write_dirty(*ctx.transaction, ctx.onodes);
+ return with_trans_intr(
+ *ctx.transaction,
+ [&](auto &t) {
+ return onode_manager->write_dirty(*ctx.transaction, ctx.onodes);
+ }
+ );
}).safe_then([this, &ctx] {
// There are some validations in onode tree during onode value
// destruction in debug mode, which need to be done before calling
{
LOG_PREFIX(SeaStore::_remove);
DEBUGT("onode={}", *ctx.transaction, *onode);
- return onode_manager->erase_onode(*ctx.transaction, onode);
+ return with_trans_intr(
+ *ctx.transaction,
+ [&](auto &t) {
+ return onode_manager->erase_onode(*ctx.transaction, onode);
+ }
+ );
}
SeaStore::tm_ret SeaStore::_touch(
[=](auto &oid, auto &ret, auto &t, auto &onode, auto &f) {
return repeat_eagain([&, this, src] {
t = transaction_manager->create_transaction(src);
- return onode_manager->get_onode(
- *t, oid
+ return with_trans_intr(
+ *t,
+ [&](auto &t) {
+ return onode_manager->get_onode(t, oid);
+ }
).safe_then([&](auto onode_ret) {
onode = std::move(onode_ret);
return f(*t, *onode);
: NodeExtentManager::create_seastore(*tm)));
{
auto t = create_mutate_transaction();
- tree->bootstrap(*t).unsafe_get();
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->bootstrap(tr);
+ }).unsafe_get();
submit_transaction(std::move(t));
}
{
auto t = create_mutate_transaction();
- tree->insert(*t).unsafe_get();
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->insert(tr);
+ }).unsafe_get();
auto start_time = mono_clock::now();
submit_transaction(std::move(t));
std::chrono::duration<double> duration = mono_clock::now() - start_time;
{
// Note: create_weak_transaction() can also work, but too slow.
auto t = create_read_transaction();
- tree->get_stats(*t).unsafe_get();
- tree->validate(*t).unsafe_get();
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->get_stats(tr);
+ }).unsafe_get();
+
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->validate(tr);
+ }).unsafe_get();
}
{
auto t = create_mutate_transaction();
- tree->erase(*t, kvs.size() * erase_ratio).unsafe_get();
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->erase(tr, kvs.size() * erase_ratio);
+ }).unsafe_get();
submit_transaction(std::move(t));
}
{
auto t = create_read_transaction();
- tree->get_stats(*t).unsafe_get();
- tree->validate(*t).unsafe_get();
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->get_stats(tr);
+ }).unsafe_get();
+
+ with_trans_intr(*t, [&](auto &tr){
+ return tree->validate(tr);
+ }).unsafe_get();
}
tree.reset();
});