namespace crimson::os::seastore::backref {
-static depth_t get_depth(const CachedExtent &e)
-{
- assert(is_backref_node(e.get_type()));
- return e.cast<BackrefNode>()->get_node_meta().depth;
-}
-
BtreeBackrefManager::mkfs_ret
BtreeBackrefManager::mkfs(
Transaction &t)
LOG_PREFIX(BtreeBackrefManager::get_mapping);
TRACET("{}", t, offset);
auto c = get_context(t);
- return with_btree_ret<BackrefBtree, BackrefPinRef>(
+ return with_btree_ret<BackrefBtree, BackrefMappingRef>(
cache,
c,
[c, offset](auto &btree) {
});
}
-void BtreeBackrefManager::complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &to_clear,
- std::vector<CachedExtentRef> &to_link)
-{
- LOG_PREFIX(BtreeBackrefManager::complete_transaction);
- DEBUGT("start", t);
- // need to call check_parent from leaf->parent
- std::sort(
- to_clear.begin(), to_clear.end(),
- [](auto &l, auto &r) { return get_depth(*l) < get_depth(*r); });
-
- for (auto &e: to_clear) {
- auto &pin = e->cast<BackrefNode>()->pin;
- DEBUGT("retiring extent {} -- {}", t, pin, *e);
- pin_set.retire(pin);
- }
-
- std::sort(
- to_link.begin(), to_link.end(),
- [](auto &l, auto &r) -> bool { return get_depth(*l) > get_depth(*r); });
-
- for (auto &e : to_link) {
- DEBUGT("linking extent -- {}", t, *e);
- pin_set.add_pin(e->cast<BackrefNode>()->pin);
- }
-
- for (auto &e: to_clear) {
- auto &pin = e->cast<BackrefNode>()->pin;
- TRACET("checking extent {} -- {}", t, pin, *e);
- pin_set.check_parent(pin);
- }
-}
-
Cache::backref_entry_query_mset_t
BtreeBackrefManager::get_cached_backref_entries_in_range(
paddr_t start,
constexpr size_t BACKREF_BLOCK_SIZE = 4096;
-class BtreeBackrefPin : public BtreeNodePin<paddr_t, laddr_t> {
+class BtreeBackrefMapping : public BtreeNodeMapping<paddr_t, laddr_t> {
extent_types_t type;
public:
- BtreeBackrefPin(op_context_t<paddr_t> ctx)
- : BtreeNodePin(ctx) {}
- BtreeBackrefPin(
+ BtreeBackrefMapping(op_context_t<paddr_t> ctx)
+ : BtreeNodeMapping(ctx) {}
+ BtreeBackrefMapping(
op_context_t<paddr_t> ctx,
CachedExtentRef parent,
uint16_t pos,
backref_map_val_t &val,
backref_node_meta_t &&meta)
- : BtreeNodePin(
+ : BtreeNodeMapping(
ctx,
parent,
pos,
using BackrefBtree = FixedKVBtree<
paddr_t, backref_map_val_t, BackrefInternalNode,
- BackrefLeafNode, BtreeBackrefPin, BACKREF_BLOCK_SIZE, false>;
+ BackrefLeafNode, BtreeBackrefMapping, BACKREF_BLOCK_SIZE, false>;
class BtreeBackrefManager : public BackrefManager {
public:
Transaction &t,
CachedExtentRef e) final;
- void complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &,
- std::vector<CachedExtentRef> &) final;
-
rewrite_extent_ret rewrite_extent(
Transaction &t,
CachedExtentRef extent) final;
- void add_pin(BackrefPin &pin) final {
- auto *bpin = reinterpret_cast<BtreeBackrefPin*>(&pin);
- pin_set.add_pin(bpin->get_range_pin());
- bpin->set_parent(nullptr);
- }
- void remove_pin(BackrefPin &pin) final {
- auto *bpin = reinterpret_cast<BtreeBackrefPin*>(&pin);
- pin_set.retire(bpin->get_range_pin());
- }
-
Cache::backref_entry_query_mset_t
get_cached_backref_entries_in_range(
paddr_t start,
private:
Cache &cache;
- btree_pin_set_t<paddr_t> pin_set;
-
op_context_t<paddr_t> get_context(Transaction &t) {
- return op_context_t<paddr_t>{cache, t, &pin_set};
+ return op_context_t<paddr_t>{cache, t};
}
};
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
- using get_mapping_ret = get_mapping_iertr::future<BackrefPinRef>;
+ using get_mapping_ret = get_mapping_iertr::future<BackrefMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
paddr_t offset) = 0;
* Insert new paddr_t -> laddr_t mapping
*/
using new_mapping_iertr = base_iertr;
- using new_mapping_ret = new_mapping_iertr::future<BackrefPinRef>;
+ using new_mapping_ret = new_mapping_iertr::future<BackrefMappingRef>;
virtual new_mapping_ret new_mapping(
Transaction &t,
paddr_t key,
Transaction &t,
scan_mapped_space_func_t &&f) = 0;
- virtual void complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &to_clear, ///< extents whose pins are to be cleared,
- // as the results of their retirements
- std::vector<CachedExtentRef> &to_link ///< fresh extents whose pins are to be inserted
- // into backref manager's pin set
- ) = 0;
-
- virtual void add_pin(BackrefPin &pin) = 0;
- virtual void remove_pin(BackrefPin &pin) = 0;
-
virtual ~BackrefManager() {}
};
template <typename key_t, typename val_t>
get_child_ret_t<LogicalCachedExtent>
-BtreeNodePin<key_t, val_t>::get_logical_extent(
+BtreeNodeMapping<key_t, val_t>::get_logical_extent(
Transaction &t)
{
assert(parent);
return v;
}
-template class BtreeNodePin<laddr_t, paddr_t>;
-template class BtreeNodePin<paddr_t, laddr_t>;
-
+template class BtreeNodeMapping<laddr_t, paddr_t>;
+template class BtreeNodeMapping<paddr_t, laddr_t>;
} // namespace crimson::os::seastore
struct op_context_t {
Cache &cache;
Transaction &trans;
- btree_pin_set_t<node_key_t> *pins = nullptr;
};
constexpr uint16_t MAX_FIXEDKVBTREE_DEPTH = 8;
}
};
-
-/**
- * btree_range_pin_t
- *
- * Element tracked by btree_pin_set_t below. Encapsulates the intrusive_set
- * hook, the fixed_kv_node_meta_t representing the key range covered by a node,
- * and extent and ref members intended to hold a reference when the extent
- * should be pinned.
- */
-template <typename T>
-class btree_pin_set_t;
-
-template <typename node_key_t>
-class FixedKVNode;
-
-template <typename node_bound_t>
-class btree_range_pin_t : public boost::intrusive::set_base_hook<> {
- friend class btree_pin_set_t<node_bound_t>;
- friend class FixedKVNode<node_bound_t>;
- fixed_kv_node_meta_t<node_bound_t> range;
-
- btree_pin_set_t<node_bound_t> *pins = nullptr;
-
- // We need to be able to remember extent without holding a reference,
- // but we can do it more compactly -- TODO
- CachedExtent *extent = nullptr;
- CachedExtentRef ref;
-
- using index_t = boost::intrusive::set<btree_range_pin_t>;
-
- void acquire_ref() {
- ref = CachedExtentRef(extent);
- }
-
- void drop_ref() {
- ref.reset();
- }
-
-public:
- btree_range_pin_t() = default;
- btree_range_pin_t(CachedExtent *extent)
- : extent(extent) {}
- btree_range_pin_t(const btree_range_pin_t &rhs, CachedExtent *extent)
- : range(rhs.range), extent(extent) {}
-
- bool has_ref() const {
- return !!ref;
- }
-
- bool is_root() const {
- return range.is_root();
- }
-
- void set_range(const fixed_kv_node_meta_t<node_bound_t> &nrange) {
- range = nrange;
- }
- void set_extent(CachedExtent *nextent) {
- ceph_assert(!extent);
- extent = nextent;
- }
-
- CachedExtent &get_extent() {
- assert(extent);
- return *extent;
- }
-
- bool has_ref() {
- return !!ref;
- }
-
- void take_pin(btree_range_pin_t &other)
- {
- ceph_assert(other.extent);
- if (other.pins) {
- other.pins->replace_pin(*this, other);
- pins = other.pins;
- other.pins = nullptr;
-
- if (other.has_ref()) {
- other.drop_ref();
- acquire_ref();
- }
- }
- }
-
- friend bool operator<(
- const btree_range_pin_t &lhs, const btree_range_pin_t &rhs) {
- assert(lhs.range.depth == rhs.range.depth);
- return lhs.range.begin < rhs.range.begin;
- }
- friend bool operator>(
- const btree_range_pin_t &lhs, const btree_range_pin_t &rhs) {
- assert(lhs.range.depth == rhs.range.depth);
- return lhs.range.begin > rhs.range.begin;
- }
- friend bool operator==(
- const btree_range_pin_t &lhs, const btree_range_pin_t &rhs) {
- assert(lhs.range.depth == rhs.range.depth);
- return lhs.range.begin == rhs.range.begin;
- }
-
- struct meta_cmp_t {
- bool operator()(
- const btree_range_pin_t &lhs, const fixed_kv_node_meta_t<node_bound_t> &rhs) const {
- assert(lhs.range.depth == rhs.depth);
- return lhs.range.begin < rhs.begin;
- }
- bool operator()(
- const fixed_kv_node_meta_t<node_bound_t> &lhs, const btree_range_pin_t &rhs) const {
- assert(lhs.depth == rhs.range.depth);
- return lhs.begin < rhs.range.begin;
- }
- };
-
- friend std::ostream &operator<<(
- std::ostream &lhs,
- const btree_range_pin_t<node_bound_t> &rhs) {
- return lhs << "btree_range_pin_t("
- << "begin=" << rhs.range.begin
- << ", end=" << rhs.range.end
- << ", depth=" << rhs.range.depth
- << ", extent=" << rhs.extent
- << ")";
- }
-
- template <typename, typename>
- friend class BtreeNodePin;
- ~btree_range_pin_t()
- {
- ceph_assert(!pins == !is_linked());
- ceph_assert(!ref);
- if (pins) {
- crimson::get_logger(ceph_subsys_seastore_lba
- ).debug("{}: removing {}", __func__, *this);
- pins->remove_pin(*this, true);
- }
- extent = nullptr;
- }
-
-};
-
-/**
- * btree_pin_set_t
- *
- * Ensures that for every cached node, all parent btree nodes required
- * to map it are present in cache. Relocating these nodes can
- * therefore be done without further reads or cache space.
- *
- * Contains a btree_range_pin_t for every clean or dirty btree node
- * or LogicalCachedExtent instance in cache at any point in time.
- * For any btree node, the contained btree_range_pin_t will hold
- * a reference to that node pinning it in cache as long as that
- * node has children in the set. This invariant can be violated
- * only by calling retire_extent and is repaired by calling
- * check_parent synchronously after adding any new extents.
- */
-template <typename node_bound_t>
-class btree_pin_set_t {
- friend class btree_range_pin_t<node_bound_t>;
- using pins_by_depth_t = std::array<
- typename btree_range_pin_t<node_bound_t>::index_t,
- MAX_FIXEDKVBTREE_DEPTH>;
- pins_by_depth_t pins_by_depth;
-
- /// Removes pin from set optionally checking whether parent has other children
- void remove_pin(btree_range_pin_t<node_bound_t> &pin, bool do_check_parent)
- {
- crimson::get_logger(ceph_subsys_seastore_lba).debug("{}: {}", __func__, pin);
- ceph_assert(pin.is_linked());
- ceph_assert(pin.pins);
- ceph_assert(!pin.ref);
-
- auto &layer = pins_by_depth[pin.range.depth];
- layer.erase(layer.s_iterator_to(pin));
- pin.pins = nullptr;
-
- if (do_check_parent) {
- check_parent(pin);
- }
- }
-
- void replace_pin(
- btree_range_pin_t<node_bound_t> &to,
- btree_range_pin_t<node_bound_t> &from)
- {
- assert(to.range.depth == from.range.depth);
- pins_by_depth[from.range.depth].replace_node(
- btree_range_pin_t<node_bound_t>::index_t::s_iterator_to(from), to);
- }
-
- /// Returns parent pin if exists
- btree_range_pin_t<node_bound_t> *maybe_get_parent(
- const fixed_kv_node_meta_t<node_bound_t> &meta)
- {
- auto cmeta = meta;
- cmeta.depth++;
- auto &layer = pins_by_depth[cmeta.depth];
- auto iter = layer.upper_bound(
- cmeta,
- typename btree_range_pin_t<node_bound_t>::meta_cmp_t());
- if (iter == layer.begin()) {
- return nullptr;
- } else {
- --iter;
- if (iter->range.is_parent_of(meta)) {
- return &*iter;
- } else {
- return nullptr;
- }
- }
- }
-
- /// Returns earliest child pin if exist
- const btree_range_pin_t<node_bound_t>
- *maybe_get_first_child(const fixed_kv_node_meta_t<node_bound_t> &meta) const
- {
- if (meta.depth == 0) {
- return nullptr;
- }
-
- auto cmeta = meta;
- cmeta.depth--;
-
- auto &layer = pins_by_depth[cmeta.depth];
- auto iter = layer.lower_bound(
- cmeta,
- typename btree_range_pin_t<node_bound_t>::meta_cmp_t());
- if (iter == layer.end()) {
- return nullptr;
- } else if (meta.is_parent_of(iter->range)) {
- return &*iter;
- } else {
- return nullptr;
- }
- }
-
- /// Releases pin if it has no children
- void release_if_no_children(btree_range_pin_t<node_bound_t> &pin)
- {
- ceph_assert(pin.is_linked());
- if (maybe_get_first_child(pin.range) == nullptr) {
- pin.drop_ref();
- }
- }
-
-public:
- btree_pin_set_t() {}
- /// Adds pin to set, assumes set is consistent
- void add_pin(btree_range_pin_t<node_bound_t> &pin)
- {
- ceph_assert(!pin.is_linked());
- ceph_assert(!pin.pins);
- ceph_assert(!pin.ref);
-
- auto &layer = pins_by_depth[pin.range.depth];
- auto [prev, inserted] = layer.insert(pin);
- if (!inserted) {
- crimson::get_logger(ceph_subsys_seastore_lba).error(
- "{}: unable to add {} ({}), found {} ({})",
- __func__,
- pin,
- *(pin.extent),
- *prev,
- *(prev->extent));
- ceph_assert(0 == "impossible");
- return;
- }
- pin.pins = this;
- if (!pin.is_root()) {
- auto *parent = maybe_get_parent(pin.range);
- ceph_assert(parent);
- if (!parent->has_ref()) {
- crimson::get_logger(ceph_subsys_seastore_lba
- ).debug("{}: acquiring parent {}", __func__,
- static_cast<void*>(parent));
- parent->acquire_ref();
- } else {
- crimson::get_logger(ceph_subsys_seastore_lba).debug(
- "{}: parent has ref {}", __func__,
- static_cast<void*>(parent));
- }
- }
- if (maybe_get_first_child(pin.range) != nullptr) {
- crimson::get_logger(ceph_subsys_seastore_lba).debug(
- "{}: acquiring self {}", __func__, pin);
- pin.acquire_ref();
- }
- }
-
-
- /**
- * retire/check_parent
- *
- * See BtreeLBAManager::complete_transaction.
- * retire removes the specified pin from the set, but does not
- * check parents. After any new extents are added to the set,
- * the caller is required to call check_parent to restore the
- * invariant.
- */
- void retire(btree_range_pin_t<node_bound_t> &pin)
- {
- pin.drop_ref();
- remove_pin(pin, false);
- }
-
- void check_parent(btree_range_pin_t<node_bound_t> &pin)
- {
- auto parent = maybe_get_parent(pin.range);
- if (parent) {
- crimson::get_logger(ceph_subsys_seastore_lba
- ).debug("{}: releasing parent {}", __func__, *parent);
- release_if_no_children(*parent);
- }
- }
-
- template <typename F>
- void scan(F &&f) {
- for (auto &layer : pins_by_depth) {
- for (auto &i : layer) {
- std::invoke(f, i);
- }
- }
- }
-
- ~btree_pin_set_t() {
- for (auto &layer : pins_by_depth) {
- ceph_assert(layer.empty());
- }
- }
-};
-
template <typename key_t, typename val_t>
-class BtreeNodePin : public PhysicalNodePin<key_t, val_t> {
+class BtreeNodeMapping : public PhysicalNodeMapping<key_t, val_t> {
op_context_t<key_t> ctx;
/**
val_t value;
extent_len_t len;
- btree_range_pin_t<key_t> pin;
+ fixed_kv_node_meta_t<key_t> range;
uint16_t pos = std::numeric_limits<uint16_t>::max();
public:
using val_type = val_t;
- BtreeNodePin(op_context_t<key_t> ctx) : ctx(ctx) {}
+ BtreeNodeMapping(op_context_t<key_t> ctx) : ctx(ctx) {}
- BtreeNodePin(
+ BtreeNodeMapping(
op_context_t<key_t> ctx,
CachedExtentRef parent,
uint16_t pos,
parent(parent),
value(value),
len(len),
+ range(std::move(meta)),
pos(pos)
{
- pin.set_range(std::move(meta));
if (!parent->is_pending()) {
this->child_pos = {parent, pos};
}
return parent;
}
- btree_range_pin_t<key_t>& get_range_pin() {
- return pin;
- }
-
CachedExtentRef get_parent() {
return parent;
}
- void set_parent(CachedExtentRef pin) {
- parent = pin;
- }
-
- void link_extent(LogicalCachedExtent *ref) final {
- pin.set_extent(ref);
- pos = std::numeric_limits<uint16_t>::max();
+ void set_parent(CachedExtentRef ext) {
+ parent = ext;
}
uint16_t get_pos() const final {
}
extent_len_t get_length() const final {
- ceph_assert(pin.range.end > pin.range.begin);
+ ceph_assert(range.end > range.begin);
return len;
}
}
key_t get_key() const final {
- return pin.range.begin;
+ return range.begin;
}
- PhysicalNodePinRef<key_t, val_t> duplicate() const final {
- auto ret = std::unique_ptr<BtreeNodePin<key_t, val_t>>(
- new BtreeNodePin<key_t, val_t>(ctx));
- ret->pin.set_range(pin.range);
+ PhysicalNodeMappingRef<key_t, val_t> duplicate() const final {
+ auto ret = std::unique_ptr<BtreeNodeMapping<key_t, val_t>>(
+ new BtreeNodeMapping<key_t, val_t>(ctx));
+ ret->range = range;
ret->value = value;
ret->parent = parent;
ret->len = len;
return ret;
}
- void take_pin(PhysicalNodePin<key_t, val_t> &opin) final {
- pin.take_pin(static_cast<BtreeNodePin<key_t, val_t>&>(opin).pin);
- }
-
bool has_been_invalidated() const final {
return parent->has_been_invalidated();
}
};
}
-
-#if FMT_VERSION >= 90000
-template <typename node_bound_t> struct fmt::formatter<crimson::os::seastore::btree_range_pin_t<node_bound_t>> : fmt::ostream_formatter {};
-#endif
return leaf.pos == 0;
}
- PhysicalNodePinRef<node_key_t, typename pin_t::val_type>
+ PhysicalNodeMappingRef<node_key_t, typename pin_t::val_type>
get_pin(op_context_t<node_key_t> ctx) const {
assert(!is_end());
auto val = get_val();
root_leaf->set_size(0);
fixed_kv_node_meta_t<node_key_t> meta{min_max_t<node_key_t>::min, min_max_t<node_key_t>::max, 1};
root_leaf->set_meta(meta);
- root_leaf->pin.set_range(meta);
+ root_leaf->range = meta;
get_tree_stats<self_type>(c.trans).depth = 1u;
get_tree_stats<self_type>(c.trans).extents_num_delta++;
link_phy_tree_root_node(root_block, root_leaf.get());
return upper_bound(c, min_max_t<node_key_t>::max);
}
+ template <typename child_node_t, typename node_t>
+ void check_node(
+ op_context_t<node_key_t> c,
+ TCachedExtentRef<node_t> node)
+ {
+ for (auto i : *node) {
+ CachedExtentRef child_node;
+ Transaction::get_extent_ret ret;
+
+ if constexpr (std::is_base_of_v<typename internal_node_t::base_t, child_node_t>) {
+ ret = c.trans.get_extent(
+ i->get_val().maybe_relative_to(node->get_paddr()),
+ &child_node);
+ } else {
+ if constexpr (leaf_has_children) {
+ ret = c.trans.get_extent(
+ i->get_val().paddr.maybe_relative_to(node->get_paddr()),
+ &child_node);
+ }
+ }
+ if (ret == Transaction::get_extent_ret::PRESENT) {
+ if (child_node->is_mutation_pending()) {
+ auto &prior = (child_node_t &)*child_node->prior_instance;
+ assert(prior.is_valid());
+ assert(prior.is_parent_valid());
+ if (node->is_mutation_pending()) {
+ auto &n = node->get_stable_for_key(i->get_key());
+ assert(prior.get_parent_node().get() == &n);
+ auto pos = n.lower_bound_offset(i->get_key());
+ assert(pos < n.get_node_size());
+ assert(n.children[pos] == &prior);
+ } else {
+ assert(prior.get_parent_node().get() == node.get());
+ assert(node->children[i->get_offset()] == &prior);
+ }
+ } else if (child_node->is_initial_pending()) {
+ auto cnode = child_node->template cast<child_node_t>();
+ auto pos = node->find(i->get_key()).get_offset();
+ auto child = node->children[pos];
+ assert(child);
+ assert(child == cnode.get());
+ assert(cnode->is_parent_valid());
+ } else {
+ assert(child_node->is_valid());
+ auto cnode = child_node->template cast<child_node_t>();
+ assert(cnode->has_parent_tracker());
+ if (node->is_pending()) {
+ auto &n = node->get_stable_for_key(i->get_key());
+ assert(cnode->get_parent_node().get() == &n);
+ auto pos = n.lower_bound_offset(i->get_key());
+ assert(pos < n.get_node_size());
+ assert(n.children[pos] == cnode.get());
+ } else {
+ assert(cnode->get_parent_node().get() == node.get());
+ assert(node->children[i->get_offset()] == cnode.get());
+ }
+ }
+ } else if (ret == Transaction::get_extent_ret::ABSENT) {
+ ChildableCachedExtent* child = nullptr;
+ if (node->is_pending()) {
+ auto &n = node->get_stable_for_key(i->get_key());
+ auto pos = n.lower_bound_offset(i->get_key());
+ assert(pos < n.get_node_size());
+ child = n.children[pos];
+ if (is_valid_child_ptr(child)) {
+ auto c = (child_node_t*)child;
+ assert(c->has_parent_tracker());
+ assert(c->get_parent_node().get() == &n);
+ }
+ } else {
+ child = node->children[i->get_offset()];
+ if (is_valid_child_ptr(child)) {
+ auto c = (child_node_t*)child;
+ assert(c->has_parent_tracker());
+ assert(c->get_parent_node().get() == node.get());
+ }
+ }
+
+ if (!is_valid_child_ptr(child)) {
+ if constexpr (
+ std::is_base_of_v<typename internal_node_t::base_t, child_node_t>)
+ {
+ assert(!c.cache.query_cache(i->get_val(), nullptr));
+ } else {
+ if constexpr (leaf_has_children) {
+ assert(!c.cache.query_cache(i->get_val().paddr, nullptr));
+ }
+ }
+ }
+ } else {
+ ceph_abort("impossible");
+ }
+ }
+ }
+
+ using check_child_trackers_ret = base_iertr::future<>;
+ check_child_trackers_ret check_child_trackers(
+ op_context_t<node_key_t> c) {
+ mapped_space_visitor_t checker = [c, this](
+ paddr_t,
+ node_key_t,
+ extent_len_t,
+ depth_t depth,
+ extent_types_t,
+ iterator& iter) {
+ if constexpr (!leaf_has_children) {
+ if (depth == 1) {
+ return seastar::now();
+ }
+ }
+ if (depth > 1) {
+ auto &node = iter.get_internal(depth).node;
+ assert(node->is_valid());
+ check_node<typename internal_node_t::base_t>(c, node);
+ } else {
+ assert(depth == 1);
+ auto &node = iter.leaf.node;
+ assert(node->is_valid());
+ check_node<LogicalCachedExtent>(c, node);
+ }
+ return seastar::now();
+ };
+
+ return seastar::do_with(
+ std::move(checker),
+ [this, c](auto &checker) {
+ return iterate_repeat(
+ c,
+ lower_bound(
+ c,
+ min_max_t<node_key_t>::min,
+ &checker),
+ [](auto &pos) {
+ if (pos.is_end()) {
+ return base_iertr::make_ready_future<
+ seastar::stop_iteration>(
+ seastar::stop_iteration::yes);
+ }
+ return base_iertr::make_ready_future<
+ seastar::stop_iteration>(
+ seastar::stop_iteration::no);
+ },
+ &checker);
+ });
+ }
+
using iterate_repeat_ret_inner = base_iertr::future<
seastar::stop_iteration>;
template <typename F>
fixed_kv_extent.get_length(),
n_fixed_kv_extent->get_bptr().c_str());
n_fixed_kv_extent->set_modify_time(fixed_kv_extent.get_modify_time());
- n_fixed_kv_extent->pin.set_range(n_fixed_kv_extent->get_node_meta());
+ n_fixed_kv_extent->range = n_fixed_kv_extent->get_node_meta();
if (fixed_kv_extent.get_type() == internal_node_t::TYPE ||
leaf_node_t::do_has_children) {
parent_pos=std::move(parent_pos)]
(internal_node_t &node) {
assert(!node.is_pending());
- assert(!node.pin.is_linked());
- node.pin.set_range(fixed_kv_node_meta_t<node_key_t>{begin, end, depth});
+ assert(!node.is_linked());
+ node.range = fixed_kv_node_meta_t<node_key_t>{begin, end, depth};
if (parent_pos) {
auto &parent = parent_pos->node;
parent->link_child(&node, parent_pos->pos);
link_phy_tree_root_node(root_block, &node);
}
}
- if (c.pins) {
- c.pins->add_pin(node.pin);
- }
};
return c.cache.template get_absent_extent<internal_node_t>(
c.trans,
*ret);
// This can only happen during init_cached_extent
// or when backref extent being rewritten by gc space reclaiming
- if (c.pins && !ret->is_pending() && !ret->pin.is_linked()) {
+ if (!ret->is_pending() && !ret->is_linked()) {
assert(ret->is_dirty()
|| (is_backref_node(ret->get_type())
&& ret->is_clean()));
parent_pos=std::move(parent_pos)]
(leaf_node_t &node) {
assert(!node.is_pending());
- assert(!node.pin.is_linked());
- node.pin.set_range(fixed_kv_node_meta_t<node_key_t>{begin, end, 1});
+ assert(!node.is_linked());
+ node.range = fixed_kv_node_meta_t<node_key_t>{begin, end, 1};
if (parent_pos) {
auto &parent = parent_pos->node;
parent->link_child(&node, parent_pos->pos);
link_phy_tree_root_node(root_block, &node);
}
}
- if (c.pins) {
- c.pins->add_pin(node.pin);
- }
};
return c.cache.template get_absent_extent<leaf_node_t>(
c.trans,
*ret);
// This can only happen during init_cached_extent
// or when backref extent being rewritten by gc space reclaiming
- if (c.pins && !ret->is_pending() && !ret->pin.is_linked()) {
+ if (!ret->is_pending() && !ret->is_linked()) {
assert(ret->is_dirty()
|| (is_backref_node(ret->get_type())
&& ret->is_clean()));
fixed_kv_node_meta_t<node_key_t> meta{
min_max_t<node_key_t>::min, min_max_t<node_key_t>::max, iter.get_depth() + 1};
nroot->set_meta(meta);
- nroot->pin.set_range(meta);
+ nroot->range = meta;
nroot->journal_insert(
nroot->begin(),
min_max_t<node_key_t>::min,
template <typename node_key_t>
struct FixedKVNode : ChildableCachedExtent {
using FixedKVNodeRef = TCachedExtentRef<FixedKVNode>;
- btree_range_pin_t<node_key_t> pin;
+ fixed_kv_node_meta_t<node_key_t> range;
struct copy_source_cmp_t {
using is_transparent = node_key_t;
bool operator()(const FixedKVNodeRef &l, const FixedKVNodeRef &r) const {
- assert(l->pin.range.end <= r->pin.range.begin
- || r->pin.range.end <= l->pin.range.begin
- || (l->pin.range.begin == r->pin.range.begin
- && l->pin.range.end == r->pin.range.end));
- return l->pin.range.begin < r->pin.range.begin;
+ assert(l->range.end <= r->range.begin
+ || r->range.end <= l->range.begin
+ || (l->range.begin == r->range.begin
+ && l->range.end == r->range.end));
+ return l->range.begin < r->range.begin;
}
bool operator()(const node_key_t &l, const FixedKVNodeRef &r) const {
- return l < r->pin.range.begin;
+ return l < r->range.begin;
}
bool operator()(const FixedKVNodeRef &l, const node_key_t &r) const {
- return l->pin.range.begin < r;
+ return l->range.begin < r;
}
};
FixedKVNode(uint16_t capacity, ceph::bufferptr &&ptr)
: ChildableCachedExtent(std::move(ptr)),
- pin(this),
children(capacity, nullptr),
capacity(capacity) {}
FixedKVNode(const FixedKVNode &rhs)
: ChildableCachedExtent(rhs),
- pin(rhs.pin, this),
+ range(rhs.range),
children(rhs.capacity, nullptr),
capacity(rhs.capacity) {}
void set_parent_tracker_from_prior_instance() {
assert(is_mutation_pending());
auto &prior = (FixedKVNode&)(*get_prior_instance());
- if (pin.is_root()) {
+ if (range.is_root()) {
ceph_assert(prior.root_block);
ceph_assert(pending_for_transaction);
root_block = prior.root_block;
// All in-memory relative addrs are necessarily record-relative
assert(get_prior_instance());
assert(pending_for_transaction);
- pin.take_pin(get_prior_instance()->template cast<FixedKVNode>()->pin);
resolve_relative_addrs(record_block_offset);
}
void on_initial_write() final {
// All in-memory relative addrs are necessarily block-relative
resolve_relative_addrs(get_paddr());
- if (pin.is_root()) {
+ if (range.is_root()) {
reset_parent_tracker();
}
assert(has_parent_tracker() ? (is_parent_valid()) : true);
virtual ~FixedKVInternalNode() {
if (this->is_valid() && !this->is_pending()) {
- if (this->pin.is_root()) {
+ if (this->range.is_root()) {
ceph_assert(this->root_block);
unlink_phy_tree_root_node<NODE_KEY>(this->root_block);
} else {
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
this->split_child_ptrs(*left, *right);
auto pivot = this->split_into(*left, *right);
- left->pin.set_range(left->get_meta());
- right->pin.set_range(right->get_meta());
+ left->range = left->get_meta();
+ right->range = right->get_meta();
return std::make_tuple(
left,
right,
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
replacement->merge_child_ptrs(*this, *right);
replacement->merge_from(*this, *right->template cast<node_type_t>());
- replacement->pin.set_range(replacement->get_meta());
+ replacement->range = replacement->get_meta();
return replacement;
}
*replacement_left,
*replacement_right);
- replacement_left->pin.set_range(replacement_left->get_meta());
- replacement_right->pin.set_range(replacement_right->get_meta());
+ replacement_left->range = replacement_left->get_meta();
+ replacement_right->range = replacement_right->get_meta();
return std::make_tuple(
replacement_left,
replacement_right,
virtual ~FixedKVLeafNode() {
if (this->is_valid() && !this->is_pending()) {
- if (this->pin.is_root()) {
+ if (this->range.is_root()) {
ceph_assert(this->root_block);
unlink_phy_tree_root_node<NODE_KEY>(this->root_block);
} else {
this->split_child_ptrs(*left, *right);
}
auto pivot = this->split_into(*left, *right);
- left->pin.set_range(left->get_meta());
- right->pin.set_range(right->get_meta());
+ left->range = left->get_meta();
+ right->range = right->get_meta();
return std::make_tuple(
left,
right,
replacement->merge_child_ptrs(*this, *right);
}
replacement->merge_from(*this, *right->template cast<node_type_t>());
- replacement->pin.set_range(replacement->get_meta());
+ replacement->range = replacement->get_meta();
return replacement;
}
*replacement_right);
}
- replacement_left->pin.set_range(replacement_left->get_meta());
- replacement_right->pin.set_range(replacement_right->get_meta());
+ replacement_left->range = replacement_left->get_meta();
+ replacement_right->range = replacement_right->get_meta();
return std::make_tuple(
replacement_left,
replacement_right,
std::ostream &LogicalCachedExtent::_print_detail(std::ostream &out) const
{
out << ", laddr=" << laddr;
- if (pin) {
- out << ", pin=" << *pin;
- } else {
- out << ", pin=empty";
- }
return print_detail_l(out);
}
}
}
-std::ostream &operator<<(std::ostream &out, const LBAPin &rhs)
+std::ostream &operator<<(std::ostream &out, const LBAMapping &rhs)
{
- return out << "LBAPin(" << rhs.get_key() << "~" << rhs.get_length()
+ return out << "LBAMapping(" << rhs.get_key() << "~" << rhs.get_length()
<< "->" << rhs.get_val();
}
bool leaf_has_children>
class FixedKVBtree;
template <typename, typename>
-class BtreeNodePin;
+class BtreeNodeMapping;
// #define DEBUG_CACHED_EXTENT_REF
#ifdef DEBUG_CACHED_EXTENT_REF
friend class crimson::os::seastore::TransactionManager;
friend class crimson::os::seastore::ExtentPlacementManager;
template <typename, typename>
- friend class BtreeNodePin;
+ friend class BtreeNodeMapping;
+ friend class ::btree_lba_manager_test;
};
std::ostream &operator<<(std::ostream &, CachedExtent::extent_state_t);
};
template <typename key_t, typename>
-class PhysicalNodePin;
+class PhysicalNodeMapping;
template <typename key_t, typename val_t>
-using PhysicalNodePinRef = std::unique_ptr<PhysicalNodePin<key_t, val_t>>;
+using PhysicalNodeMappingRef = std::unique_ptr<PhysicalNodeMapping<key_t, val_t>>;
template <typename key_t, typename val_t>
-class PhysicalNodePin {
+class PhysicalNodeMapping {
public:
- virtual void link_extent(LogicalCachedExtent *ref) = 0;
- virtual void take_pin(PhysicalNodePin<key_t, val_t> &pin) = 0;
virtual extent_len_t get_length() const = 0;
virtual extent_types_t get_type() const = 0;
virtual val_t get_val() const = 0;
virtual key_t get_key() const = 0;
- virtual PhysicalNodePinRef<key_t, val_t> duplicate() const = 0;
+ virtual PhysicalNodeMappingRef<key_t, val_t> duplicate() const = 0;
virtual bool has_been_invalidated() const = 0;
virtual CachedExtentRef get_parent() const = 0;
virtual uint16_t get_pos() const = 0;
child_pos->link_child(c);
}
- virtual ~PhysicalNodePin() {}
+ virtual ~PhysicalNodeMapping() {}
protected:
std::optional<child_pos_t> child_pos = std::nullopt;
};
-using LBAPin = PhysicalNodePin<laddr_t, paddr_t>;
-using LBAPinRef = PhysicalNodePinRef<laddr_t, paddr_t>;
+using LBAMapping = PhysicalNodeMapping<laddr_t, paddr_t>;
+using LBAMappingRef = PhysicalNodeMappingRef<laddr_t, paddr_t>;
-std::ostream &operator<<(std::ostream &out, const LBAPin &rhs);
+std::ostream &operator<<(std::ostream &out, const LBAMapping &rhs);
-using lba_pin_list_t = std::list<LBAPinRef>;
+using lba_pin_list_t = std::list<LBAMappingRef>;
std::ostream &operator<<(std::ostream &out, const lba_pin_list_t &rhs);
-using BackrefPin = PhysicalNodePin<paddr_t, laddr_t>;
-using BackrefPinRef = PhysicalNodePinRef<paddr_t, laddr_t>;
+using BackrefMapping = PhysicalNodeMapping<paddr_t, laddr_t>;
+using BackrefMappingRef = PhysicalNodeMappingRef<paddr_t, laddr_t>;
-using backref_pin_list_t = std::list<BackrefPinRef>;
+using backref_pin_list_t = std::list<BackrefMappingRef>;
/**
* RetiredExtentPlaceholder
: ChildableCachedExtent(std::forward<T>(t)...)
{}
- void set_pin(LBAPinRef &&npin) {
- assert(!pin);
- pin = std::move(npin);
- laddr = pin->get_key();
- pin->link_extent(this);
- }
-
- bool has_pin() const {
- return !!pin;
- }
-
- LBAPin &get_pin() {
- assert(pin);
- return *pin;
+ bool has_laddr() const {
+ return laddr != L_ADDR_NULL;
}
laddr_t get_laddr() const {
void on_delta_write(paddr_t record_block_offset) final {
assert(is_exist_mutation_pending() ||
get_prior_instance());
- if (get_prior_instance()) {
- pin->take_pin(*(get_prior_instance()->cast<LogicalCachedExtent>()->pin));
- }
logical_on_delta_write();
}
private:
laddr_t laddr = L_ADDR_NULL;
- LBAPinRef pin;
template <
typename node_key_t,
template <> struct fmt::formatter<crimson::os::seastore::lba_pin_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::CachedExtent> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::LogicalCachedExtent> : fmt::ostream_formatter {};
-template <> struct fmt::formatter<crimson::os::seastore::LBAPin> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::LBAMapping> : fmt::ostream_formatter {};
#endif
*/
using get_mapping_iertr = base_iertr::extend<
crimson::ct_error::enoent>;
- using get_mapping_ret = get_mapping_iertr::future<LBAPinRef>;
+ using get_mapping_ret = get_mapping_iertr::future<LBAMappingRef>;
virtual get_mapping_ret get_mapping(
Transaction &t,
laddr_t offset) = 0;
*
* Offset will be relative to the block offset of the record
* This mapping will block from transaction submission until set_paddr
- * is called on the LBAPin.
+ * is called on the LBAMapping.
*/
using alloc_extent_iertr = base_iertr;
- using alloc_extent_ret = alloc_extent_iertr::future<LBAPinRef>;
+ using alloc_extent_ret = alloc_extent_iertr::future<LBAMappingRef>;
virtual alloc_extent_ret alloc_extent(
Transaction &t,
laddr_t hint,
Transaction &t,
laddr_t addr) = 0;
- virtual void complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &to_clear, ///< extents whose pins are to be cleared,
- // as the results of their retirements
- std::vector<CachedExtentRef> &to_link ///< fresh extents whose pins are to be inserted
- // into backref manager's pin set
- ) = 0;
-
/**
* Should be called after replay on each cached extent.
- * Implementation must initialize the LBAPin on any
+ * Implementation must initialize the LBAMapping on any
* LogicalCachedExtent's and may also read in any dependent
* structures, etc.
*
laddr_t laddr,
extent_len_t len) = 0;
- virtual void add_pin(LBAPin &pin) = 0;
-
virtual ~LBAManager() {}
};
using LBAManagerRef = std::unique_ptr<LBAManager>;
LOG_PREFIX(BtreeLBAManager::get_mapping);
TRACET("{}", t, offset);
auto c = get_context(t);
- return with_btree_ret<LBABtree, LBAPinRef>(
+ return with_btree_ret<LBABtree, LBAMappingRef>(
cache,
c,
[FNAME, c, offset](auto &btree) {
state.last_end,
lba_map_val_t{len, addr, 1, 0},
nextent
- ).si_then([&state, FNAME, c, addr, len, hint](auto &&p) {
+ ).si_then([&state, FNAME, c, addr, len, hint, nextent](auto &&p) {
auto [iter, inserted] = std::move(p);
TRACET("{}~{}, hint={}, inserted at {}",
c.trans, addr, len, hint, state.last_end);
+ if (nextent) {
+ nextent->set_laddr(iter.get_key());
+ }
ceph_assert(inserted);
state.ret = iter;
});
return is_lba_node(e.get_type());
}
-btree_range_pin_t<laddr_t> &BtreeLBAManager::get_pin(
- CachedExtent &e)
-{
- if (is_lba_node(e)) {
- return e.cast<LBANode>()->pin;
- } else if (e.is_logical()) {
- return static_cast<BtreeLBAPin &>(
- e.cast<LogicalCachedExtent>()->get_pin()).get_range_pin();
- } else {
- ceph_abort_msg("impossible");
- }
-}
-
-static depth_t get_depth(const CachedExtent &e)
-{
- if (is_lba_node(e)) {
- return e.cast<LBANode>()->get_node_meta().depth;
- } else if (e.is_logical()) {
- return 0;
- } else {
- ceph_assert(0 == "currently impossible");
- return 0;
- }
-}
-
-void BtreeLBAManager::complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &to_clear,
- std::vector<CachedExtentRef> &to_link)
-{
- LOG_PREFIX(BtreeLBAManager::complete_transaction);
- DEBUGT("start", t);
- // need to call check_parent from leaf->parent
- std::sort(
- to_clear.begin(), to_clear.end(),
- [](auto &l, auto &r) { return get_depth(*l) < get_depth(*r); });
-
- for (auto &e: to_clear) {
- auto &pin = get_pin(*e);
- DEBUGT("retiring extent {} -- {}", t, pin, *e);
- pin_set.retire(pin);
- }
-
- std::sort(
- to_link.begin(), to_link.end(),
- [](auto &l, auto &r) -> bool { return get_depth(*l) > get_depth(*r); });
-
- for (auto &e : to_link) {
- DEBUGT("linking extent -- {}", t, *e);
- pin_set.add_pin(get_pin(*e));
- }
-
- for (auto &e: to_clear) {
- auto &pin = get_pin(*e);
- TRACET("checking extent {} -- {}", t, pin, *e);
- pin_set.check_parent(pin);
- }
-}
-
BtreeLBAManager::base_iertr::template future<>
_init_cached_extent(
op_context_t<laddr_t> c,
iter.get_val().paddr == logn->get_paddr()) {
assert(!iter.get_leaf_node()->is_pending());
iter.get_leaf_node()->link_child(logn.get(), iter.get_leaf_pos());
- logn->set_pin(iter.get_pin(c));
+ logn->set_laddr(iter.get_pin(c)->get_key());
ceph_assert(iter.get_val().len == e->get_length());
- if (c.pins) {
- c.pins->add_pin(
- static_cast<BtreeLBAPin&>(logn->get_pin()).get_range_pin());
- }
DEBUGT("logical extent {} live", c.trans, *logn);
ret = true;
} else {
namespace crimson::os::seastore::lba_manager::btree {
-class BtreeLBAPin : public BtreeNodePin<laddr_t, paddr_t> {
+class BtreeLBAMapping : public BtreeNodeMapping<laddr_t, paddr_t> {
public:
- BtreeLBAPin(op_context_t<laddr_t> ctx)
- : BtreeNodePin(ctx) {}
- BtreeLBAPin(
+ BtreeLBAMapping(op_context_t<laddr_t> ctx)
+ : BtreeNodeMapping(ctx) {}
+ BtreeLBAMapping(
op_context_t<laddr_t> c,
CachedExtentRef parent,
uint16_t pos,
lba_map_val_t &val,
lba_node_meta_t &&meta)
- : BtreeNodePin(
+ : BtreeNodeMapping(
c,
parent,
pos,
using LBABtree = FixedKVBtree<
laddr_t, lba_map_val_t, LBAInternalNode,
- LBALeafNode, BtreeLBAPin, LBA_BLOCK_SIZE, true>;
+ LBALeafNode, BtreeLBAMapping, LBA_BLOCK_SIZE, true>;
/**
* BtreeLBAManager
return update_refcount(t, addr, 1);
}
- void complete_transaction(
- Transaction &t,
- std::vector<CachedExtentRef> &,
- std::vector<CachedExtentRef> &) final;
-
/**
* init_cached_extent
*
paddr_t addr,
laddr_t laddr,
extent_len_t len) final;
-
- void add_pin(LBAPin &pin) final {
- auto *bpin = reinterpret_cast<BtreeLBAPin*>(&pin);
- pin_set.add_pin(bpin->get_range_pin());
- bpin->set_parent(nullptr);
- }
-
- ~BtreeLBAManager() {
- pin_set.scan([](auto &i) {
- LOG_PREFIX(BtreeLBAManager::~BtreeLBAManager);
- SUBERROR(seastore_lba, "Found {}, has_ref={} -- {}",
- i, i.has_ref(), i.get_extent());
- });
- }
private:
Cache &cache;
- btree_pin_set_t<laddr_t> pin_set;
struct {
uint64_t num_alloc_extents = 0;
} stats;
op_context_t<laddr_t> get_context(Transaction &t) {
- return op_context_t<laddr_t>{cache, t, &pin_set};
+ return op_context_t<laddr_t>{cache, t};
}
- static btree_range_pin_t<laddr_t> &get_pin(CachedExtent &e);
-
seastar::metrics::metric_group metrics;
void register_metrics();
std::optional<extent_to_write_t>,
std::optional<bufferptr>>;
using operate_ret = get_iertr::future<operate_ret_bare>;
-operate_ret operate_left(context_t ctx, LBAPinRef &pin, const overwrite_plan_t &overwrite_plan)
+operate_ret operate_left(context_t ctx, LBAMappingRef &pin, const overwrite_plan_t &overwrite_plan)
{
if (overwrite_plan.get_left_size() == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
*
* Proceed overwrite_plan.right_operation.
*/
-operate_ret operate_right(context_t ctx, LBAPinRef &pin, const overwrite_plan_t &overwrite_plan)
+operate_ret operate_right(context_t ctx, LBAMappingRef &pin, const overwrite_plan_t &overwrite_plan)
{
if (overwrite_plan.get_right_size() == 0) {
return get_iertr::make_ready_future<operate_ret_bare>(
backref_to_clear.push_back(e);
}
- // ...but add_pin from parent->leaf
- std::vector<CachedExtentRef> lba_to_link;
- std::vector<CachedExtentRef> backref_to_link;
- lba_to_link.reserve(tref.get_fresh_block_stats().num +
- tref.get_existing_block_stats().valid_num);
- backref_to_link.reserve(tref.get_fresh_block_stats().num);
- tref.for_each_fresh_block([&](auto &e) {
- if (e->is_valid()) {
- if (is_lba_node(e->get_type()) || e->is_logical())
- lba_to_link.push_back(e);
- else if (is_backref_node(e->get_type()))
- backref_to_link.push_back(e);
- }
- });
-
- for (auto &e: tref.get_existing_block_list()) {
- if (e->is_valid()) {
- lba_to_link.push_back(e);
- }
- }
-
- lba_manager->complete_transaction(tref, lba_to_clear, lba_to_link);
- backref_manager->complete_transaction(tref, backref_to_clear, backref_to_link);
-
journal->get_trimmer().update_journal_tails(
cache->get_oldest_dirty_from().value_or(start_seq),
cache->get_oldest_backref_dirty_from().value_or(start_seq));
lextent->get_length(),
nlextent->get_bptr().c_str());
nlextent->set_laddr(lextent->get_laddr());
- nlextent->set_pin(lextent->get_pin().duplicate());
nlextent->set_modify_time(lextent->get_modify_time());
DEBUGT("rewriting logical extent -- {} to {}", t, *lextent, *nlextent);
return trans_intr::parallel_for_each(
pin_list,
[=, this, &list, &t](
- LBAPinRef &pin) -> Cache::get_extent_iertr::future<>
+ LBAMappingRef &pin) -> Cache::get_extent_iertr::future<>
{
auto pin_paddr = pin->get_val();
auto &pin_seg_paddr = pin_paddr.as_seg_paddr();
* Get the logical pin at offset
*/
using get_pin_iertr = LBAManager::get_mapping_iertr;
- using get_pin_ret = LBAManager::get_mapping_iertr::future<LBAPinRef>;
+ using get_pin_ret = LBAManager::get_mapping_iertr::future<LBAMappingRef>;
get_pin_ret get_pin(
Transaction &t,
laddr_t offset) {
auto ret = cache->duplicate_for_write(
t,
ref)->cast<LogicalCachedExtent>();
- if (!ret->has_pin()) {
+ if (!ret->has_laddr()) {
SUBDEBUGT(seastore_tm,
"duplicating extent for write -- {} -> {}",
t,
*ref,
*ret);
- ret->set_pin(ref->get_pin().duplicate());
+ ret->set_laddr(ref->get_laddr());
} else {
SUBTRACET(seastore_tm,
"extent is already duplicated -- {}",
len,
ext->get_paddr(),
ext.get()
- ).si_then([ext=std::move(ext), laddr_hint, &t, FNAME](auto &&ref) mutable {
- ext->set_pin(std::move(ref));
+ ).si_then([ext=std::move(ext), laddr_hint, &t](auto &&) mutable {
+ LOG_PREFIX(TransactionManager::alloc_extent);
SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint);
return alloc_extent_iertr::make_ready_future<TCachedExtentRef<T>>(
std::move(ext));
ext.get()
).si_then([ext=std::move(ext), laddr_hint, this](auto &&ref) {
ceph_assert(laddr_hint == ref->get_key());
- ext->set_pin(std::move(ref));
return epm->read(
ext->get_paddr(),
ext->get_length(),
using reserve_extent_iertr = alloc_extent_iertr;
- using reserve_extent_ret = reserve_extent_iertr::future<LBAPinRef>;
+ using reserve_extent_ret = reserve_extent_iertr::future<LBAMappingRef>;
reserve_extent_ret reserve_region(
Transaction &t,
laddr_t hint,
assert(pin->get_parent());
assert(!pin->get_parent()->is_pending());
pin->link_child(&lextent);
- lextent.set_pin(std::move(pin));
- lba_manager->add_pin(lextent.get_pin());
+ lextent.set_laddr(pin->get_key());
}
).si_then([FNAME, &t](auto ref) {
SUBTRACET(seastore_tm, "got extent -- {}", t, *ref);
btree_lba_manager_test() = default;
- void complete_commit(Transaction &t) final {
- std::vector<CachedExtentRef> lba_to_clear;
- lba_to_clear.reserve(t.get_retired_set().size());
- for (auto &e: t.get_retired_set()) {
- if (e->is_logical() || is_lba_node(e->get_type()))
- lba_to_clear.push_back(e);
- }
- std::vector<CachedExtentRef> lba_to_link;
- lba_to_link.reserve(t.get_fresh_block_stats().num);
- t.for_each_fresh_block([&](auto &e) {
- if (e->is_valid() &&
- (is_lba_node(e->get_type()) || e->is_logical()))
- lba_to_link.push_back(e);
- });
-
- lba_manager->complete_transaction(t, lba_to_clear, lba_to_link);
- }
+ void complete_commit(Transaction &t) final {}
LBAManager::mkfs_ret test_structure_setup(Transaction &t) final {
lba_manager.reset(new BtreeLBAManager(*cache));
}
}
}
- std::list<LBAPinRef> get_mappings(objaddr_t offset, extent_len_t length) {
+ std::list<LBAMappingRef> get_mappings(objaddr_t offset, extent_len_t length) {
auto t = create_mutate_transaction();
auto ret = with_trans_intr(*t, [&](auto &t) {
return tm->get_pins(t, offset, length);