BtreeBackrefManager::scan_mapped_space_func_t &&f)
{
LOG_PREFIX(BtreeBackrefManager::scan_mapped_space);
- DEBUGT("start", t);
+ DEBUGT("scan backref tree", t);
auto c = get_context(t);
return seastar::do_with(
std::move(f),
- [this, c](auto &visitor) {
+ [this, c, FNAME](auto &scan_visitor)
+ {
+ auto block_size = cache.get_block_size();
+ BackrefBtree::mapped_space_visitor_t f =
+ [&scan_visitor, block_size, FNAME, c](
+ paddr_t paddr, extent_len_t len,
+ depth_t depth, extent_types_t type) {
+ TRACET("tree node {}~{} {}, depth={} used",
+ c.trans, paddr, len, type, depth);
+ ceph_assert(paddr.is_absolute());
+ ceph_assert(len > 0 && len % block_size == 0);
+ ceph_assert(depth >= 1);
+ ceph_assert(is_backref_node(type));
+ return scan_visitor(paddr, len, type, L_ADDR_NULL);
+ };
+ return seastar::do_with(
+ std::move(f),
+ [this, c, &scan_visitor, block_size, FNAME](auto &tree_visitor)
+ {
return with_btree<BackrefBtree>(
- cache,
- c,
- [c, &visitor](auto &btree) {
- return BackrefBtree::iterate_repeat(
- c,
- btree.lower_bound(
- c,
- paddr_t::make_seg_paddr(
- segment_id_t{0, 0}, 0),
- &visitor),
- [&visitor](auto &pos) {
- if (pos.is_end()) {
- return BackrefBtree::iterate_repeat_ret_inner(
- interruptible::ready_future_marker{},
- seastar::stop_iteration::yes);
- }
- visitor(pos.get_key(), pos.get_val().len, 0, pos.get_val().type);
- return BackrefBtree::iterate_repeat_ret_inner(
- interruptible::ready_future_marker{},
- seastar::stop_iteration::no);
- },
- &visitor);
- });
+ cache, c,
+ [c, &scan_visitor, &tree_visitor, block_size, FNAME](auto &btree)
+ {
+ return BackrefBtree::iterate_repeat(
+ c,
+ btree.lower_bound(
+ c,
+ paddr_t::make_seg_paddr(segment_id_t{0, 0}, 0),
+ &tree_visitor),
+ [c, &scan_visitor, block_size, FNAME](auto &pos) {
+ if (pos.is_end()) {
+ return BackrefBtree::iterate_repeat_ret_inner(
+ interruptible::ready_future_marker{},
+ seastar::stop_iteration::yes);
+ }
+ TRACET("tree value {}~{} {}~{} {} used",
+ c.trans,
+ pos.get_key(),
+ pos.get_val().len,
+ pos.get_val().laddr,
+ pos.get_val().len,
+ pos.get_val().type);
+ ceph_assert(pos.get_key().is_absolute());
+ ceph_assert(pos.get_val().len > 0 &&
+ pos.get_val().len % block_size == 0);
+ ceph_assert(!is_backref_node(pos.get_val().type));
+ ceph_assert(pos.get_val().laddr != L_ADDR_NULL);
+ scan_visitor(
+ pos.get_key(),
+ pos.get_val().len,
+ pos.get_val().type,
+ pos.get_val().laddr);
+ return BackrefBtree::iterate_repeat_ret_inner(
+ interruptible::ready_future_marker{},
+ seastar::stop_iteration::no);
+ },
+ &tree_visitor
+ );
+ });
+ }).si_then([this, &scan_visitor, c, FNAME, block_size] {
+ DEBUGT("scan backref cache", c.trans);
+ auto &backrefs = cache.get_backrefs();
+ for (auto &backref : backrefs) {
+ if (backref.laddr == L_ADDR_NULL) {
+ TRACET("backref entry {}~{} {} free",
+ c.trans,
+ backref.paddr,
+ backref.len,
+ backref.type);
+ } else {
+ TRACET("backref entry {}~{} {}~{} {} used",
+ c.trans,
+ backref.paddr,
+ backref.len,
+ backref.laddr,
+ backref.len,
+ backref.type);
+ }
+ ceph_assert(backref.paddr.is_absolute());
+ ceph_assert(backref.len > 0 &&
+ backref.len % block_size == 0);
+ ceph_assert(!is_backref_node(backref.type));
+ scan_visitor(
+ backref.paddr,
+ backref.len,
+ backref.type,
+ backref.laddr);
+ }
});
+ });
}
BtreeBackrefManager::base_iertr::future<> _init_cached_extent(
paddr_t offset) = 0;
/**
- * scan all extents, including backref extents, logical extents and lba extents,
+ * scan all extents in both tree and cache,
+ * including backref extents, logical extents and lba extents,
* visit them with scan_mapped_space_func_t
*/
using scan_mapped_space_iertr = base_iertr;
using scan_mapped_space_ret = scan_mapped_space_iertr::future<>;
using scan_mapped_space_func_t = std::function<
- void(paddr_t, extent_len_t, depth_t, extent_types_t)>;
+ void(paddr_t, extent_len_t, extent_types_t, laddr_t)>;
virtual scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) = 0;
});
}).safe_then([this] {
return journal->open_for_mount();
- }).safe_then([this, FNAME](auto start_seq) {
+ }).safe_then([this](auto start_seq) {
async_cleaner->set_journal_head(start_seq);
return with_transaction_weak(
"mount",
- [this, FNAME](auto &t)
+ [this](auto &t)
{
return cache->init_cached_extents(t, [this](auto &t, auto &e) {
if (is_backref_node(e->get_type())) {
} else {
return lba_manager->init_cached_extent(t, e);
}
- }).si_then([this, FNAME, &t] {
+ }).si_then([this, &t] {
assert(async_cleaner->debug_check_space(
*async_cleaner->get_empty_space_tracker()));
return backref_manager->scan_mapped_space(
t,
- [this, FNAME, &t](
- paddr_t addr,
+ [this](
+ paddr_t paddr,
extent_len_t len,
- depth_t depth,
- extent_types_t type) {
- TRACET(
- "marking {}~{} used",
- t,
- addr,
- len);
- async_cleaner->mark_space_used(
- addr,
- len ,
- /* init_scan = */ true);
+ extent_types_t type,
+ laddr_t laddr) {
if (is_backref_node(type)) {
- ceph_assert(depth);
- backref_manager->cache_new_backref_extent(addr, type);
+ assert(laddr == L_ADDR_NULL);
+ backref_manager->cache_new_backref_extent(paddr, type);
cache->update_tree_extents_num(type, 1);
- return seastar::now();
+ async_cleaner->mark_space_used(paddr, len, true);
+ } else if (laddr == L_ADDR_NULL) {
+ cache->update_tree_extents_num(type, -1);
+ async_cleaner->mark_space_free(paddr, len, true);
} else {
- ceph_assert(!depth);
cache->update_tree_extents_num(type, 1);
- return seastar::now();
+ async_cleaner->mark_space_used(paddr, len, true);
}
});
- }).si_then([this, FNAME, &t] {
- auto &backrefs = backref_manager->get_cached_backrefs();
- DEBUGT("scan backref cache", t);
- for (auto &backref : backrefs) {
- if (backref.laddr == L_ADDR_NULL) {
- async_cleaner->mark_space_free(
- backref.paddr,
- backref.len,
- true);
- cache->update_tree_extents_num(backref.type, -1);
- } else {
- async_cleaner->mark_space_used(
- backref.paddr,
- backref.len,
- true);
- cache->update_tree_extents_num(backref.type, 1);
- }
- }
- return seastar::now();
});
});
}).safe_then([this] {
with_trans_intr(
*t.t,
[this, &tracker](auto &t) {
- return backref_manager->scan_mapped_space(
- t,
- [&tracker](auto offset, auto len, depth_t, extent_types_t) {
- if (offset.get_addr_type() == paddr_types_t::SEGMENT) {
- logger().debug("check_usage: tracker alloc {}~{}",
- offset, len);
- tracker->allocate(
- offset.as_seg_paddr().get_segment_id(),
- offset.as_seg_paddr().get_segment_off(),
- len);
- }
- }).si_then([&tracker, this] {
- auto &backrefs = backref_manager->get_cached_backrefs();
- for (auto &backref : backrefs) {
- if (backref.paddr.get_addr_type() == paddr_types_t::SEGMENT) {
- if (backref.laddr == L_ADDR_NULL) {
- tracker->release(
- backref.paddr.as_seg_paddr().get_segment_id(),
- backref.paddr.as_seg_paddr().get_segment_off(),
- backref.len);
- } else {
- tracker->allocate(
- backref.paddr.as_seg_paddr().get_segment_id(),
- backref.paddr.as_seg_paddr().get_segment_off(),
- backref.len);
- }
- }
- }
- return seastar::now();
- });
- }).unsafe_get0();
+ return backref_manager->scan_mapped_space(
+ t,
+ [&tracker](
+ paddr_t paddr,
+ extent_len_t len,
+ extent_types_t type,
+ laddr_t laddr) {
+ if (paddr.get_addr_type() == paddr_types_t::SEGMENT) {
+ if (is_backref_node(type)) {
+ assert(laddr == L_ADDR_NULL);
+ tracker->allocate(
+ paddr.as_seg_paddr().get_segment_id(),
+ paddr.as_seg_paddr().get_segment_off(),
+ len);
+ } else if (laddr == L_ADDR_NULL) {
+ tracker->release(
+ paddr.as_seg_paddr().get_segment_id(),
+ paddr.as_seg_paddr().get_segment_off(),
+ len);
+ } else {
+ tracker->allocate(
+ paddr.as_seg_paddr().get_segment_id(),
+ paddr.as_seg_paddr().get_segment_off(),
+ len);
+ }
+ }
+ });
+ }).unsafe_get0();
return async_cleaner->debug_check_space(*tracker);
}