#include "crimson/os/seastore/object_data_handler.h"
#include "crimson/os/seastore/collection_manager/collection_flat_node.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/node_extent_manager/seastore.h"
+#include "crimson/os/seastore/backref/backref_tree_node.h"
#include "test/crimson/seastore/test_block.h"
using std::string_view;
// Transaction is now a go, set up in-memory cache state
// invalidate now invalid blocks
io_stat_t retire_stat;
+ std::vector<alloc_delta_t> alloc_deltas;
+ alloc_delta_t rel_delta;
+ rel_delta.op = alloc_delta_t::op_types_t::CLEAR;
for (auto &i: t.retired_set) {
get_by_ext(efforts.retire_by_ext,
i->get_type()).increment(i->get_length());
retire_stat.increment(i->get_length());
DEBUGT("retired and remove extent -- {}", t, *i);
commit_retire_extent(t, i);
- // FIXME: whether the extent belongs to RBM should be available through its
- // device-id from its paddr after RBM is properly integrated.
- /*
- if (i belongs to RBM) {
- paddr_t paddr = i->get_paddr();
- alloc_delta_t delta;
- delta.op = alloc_delta_t::op_types_t::CLEAR;
- delta.alloc_blk_ranges.push_back(std::make_pair(paddr, i->get_length()));
- t.add_rbm_alloc_info_blocks(delta);
+ if (is_backref_mapped_extent_node(i)
+ || is_retired_placeholder(i->get_type())) {
+ rel_delta.alloc_blk_ranges.emplace_back(
+ i->get_paddr(),
+ L_ADDR_NULL,
+ i->get_length(),
+ i->get_type());
}
- */
}
+ alloc_deltas.emplace_back(std::move(rel_delta));
record.extents.reserve(t.inline_block_list.size());
io_stat_t fresh_stat;
io_stat_t fresh_invalid_stat;
+ alloc_delta_t alloc_delta;
+ alloc_delta.op = alloc_delta_t::op_types_t::SET;
for (auto &i: t.inline_block_list) {
if (!i->is_valid()) {
DEBUGT("invalid fresh inline extent -- {}", t, *i);
std::move(bl),
i->get_last_modified().time_since_epoch().count()
});
- }
-
- for (auto b : t.rbm_alloc_info_blocks) {
- bufferlist bl;
- encode(b, bl);
- delta_info_t delta;
- delta.type = extent_types_t::RBM_ALLOC_INFO;
- delta.bl = bl;
- record.push_back(std::move(delta));
+ if (i->is_valid()
+ && is_backref_mapped_extent_node(i)) {
+ alloc_delta.alloc_blk_ranges.emplace_back(
+ i->get_paddr(),
+ i->is_logical()
+ ? i->cast<LogicalCachedExtent>()->get_laddr()
+ : (is_lba_node(i->get_type())
+ ? i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin
+ : L_ADDR_NULL),
+ i->get_length(),
+ i->get_type());
+ }
}
for (auto &i: t.ool_block_list) {
assert(!i->is_inline());
get_by_ext(efforts.fresh_ool_by_ext,
i->get_type()).increment(i->get_length());
+ if (is_backref_mapped_extent_node(i)) {
+ alloc_delta.alloc_blk_ranges.emplace_back(
+ i->get_paddr(),
+ i->is_logical()
+ ? i->cast<LogicalCachedExtent>()->get_laddr()
+ : i->cast<lba_manager::btree::LBANode>()->get_node_meta().begin,
+ i->get_length(),
+ i->get_type());
+ }
+ }
+ alloc_deltas.emplace_back(std::move(alloc_delta));
+
+ for (auto b : alloc_deltas) {
+ bufferlist bl;
+ encode(b, bl);
+ delta_info_t delta;
+ delta.type = extent_types_t::ALLOC_INFO;
+ delta.bl = bl;
+ record.push_back(std::move(delta));
}
ceph_assert(t.get_fresh_block_stats().num ==
type(type),
seq(seq)
{}
+ backref_buf_entry_t(alloc_blk_t alloc_blk)
+ : paddr(alloc_blk.paddr),
+ laddr(alloc_blk.laddr),
+ len(alloc_blk.len),
+ type(alloc_blk.type)
+ {}
const paddr_t paddr = P_ADDR_NULL;
const laddr_t laddr = L_ADDR_NULL;
const extent_len_t len = 0;
#endif
+bool is_backref_mapped_extent_node(const CachedExtentRef &extent) {
+ return extent->is_logical()
+ || is_lba_node(extent->get_type())
+ || extent->get_type() == extent_types_t::TEST_BLOCK_PHYSICAL;
+}
+
std::ostream &operator<<(std::ostream &out, CachedExtent::extent_state_t state)
{
switch (state) {
std::ostream &operator<<(std::ostream &, CachedExtent::extent_state_t);
std::ostream &operator<<(std::ostream &, const CachedExtent&);
+bool is_backref_mapped_extent_node(const CachedExtentRef &extent);
+
/// Compare extents by paddr
struct paddr_cmp {
bool operator()(paddr_t lhs, const CachedExtent &rhs) const {
*
*/
return find_free_block(t, size
- ).safe_then([this, &t] (auto alloc_extent) mutable
+ ).safe_then([this] (auto alloc_extent) mutable
-> allocate_ertr::future<paddr_t> {
logger().debug("after find_free_block: allocated {}", alloc_extent);
- if (!alloc_extent.empty()) {
- alloc_delta_t alloc_info;
- for (auto p : alloc_extent) {
- paddr_t paddr = convert_abs_addr_to_paddr(
- p.first * super.block_size,
- super.device_id);
- size_t len = p.second * super.block_size;
- alloc_info.alloc_blk_ranges.push_back(std::make_pair(paddr, len));
- alloc_info.op = alloc_delta_t::op_types_t::SET;
- }
- t.add_alloc_info_blocks(std::move(alloc_info));
- } else {
+ if (alloc_extent.empty()) {
return crimson::ct_error::enospc::make();
}
paddr_t paddr = convert_abs_addr_to_paddr(
from,
super.device_id);
alloc_delta_t alloc_info;
- alloc_info.alloc_blk_ranges.push_back(std::make_pair(paddr, len));
+ alloc_info.alloc_blk_ranges.emplace_back(
+ paddr, L_ADDR_NULL, len, extent_types_t::ROOT);
alloc_info.op = alloc_delta_t::op_types_t::CLEAR;
v.push_back(alloc_info);
}
[this](auto &alloc) {
return crimson::do_for_each(alloc.alloc_blk_ranges,
[this, &alloc] (auto &range) -> write_ertr::future<> {
- logger().debug("range {} ~ {}", range.first, range.second);
+ logger().debug("range {} ~ {}", range.paddr, range.len);
bitmap_op_types_t op =
(alloc.op == alloc_delta_t::op_types_t::SET) ?
bitmap_op_types_t::ALL_SET :
bitmap_op_types_t::ALL_CLEAR;
rbm_abs_addr addr = convert_paddr_to_abs_addr(
- range.first);
+ range.paddr);
blk_no_t start = addr / super.block_size;
blk_no_t end = start +
- (round_up_to(range.second, super.block_size)) / super.block_size
+ (round_up_to(range.len, super.block_size)) / super.block_size
- 1;
return rbm_sync_block_bitmap_by_range(
start,
for (auto r : b.alloc_blk_ranges) {
if (b.op == alloc_delta_t::op_types_t::SET) {
alloc_block_count +=
- round_up_to(r.second, super.block_size) / super.block_size;
+ round_up_to(r.len, super.block_size) / super.block_size;
logger().debug(" complete alloc block: start {} len {} ",
- r.first, r.second);
+ r.paddr, r.len);
} else {
alloc_block_count -=
- round_up_to(r.second, super.block_size) / super.block_size;
+ round_up_to(r.len, super.block_size) / super.block_size;
logger().debug(" complete alloc block: start {} len {} ",
- r.first, r.second);
+ r.paddr, r.len);
}
}
}
}
};
+struct alloc_blk_t {
+ alloc_blk_t(
+ paddr_t paddr,
+ laddr_t laddr,
+ extent_len_t len,
+ extent_types_t type)
+ : paddr(paddr), laddr(laddr), len(len), type(type)
+ {}
+
+ explicit alloc_blk_t() = default;
+
+ paddr_t paddr = P_ADDR_NULL;
+ laddr_t laddr = L_ADDR_NULL;
+ extent_len_t len = 0;
+ extent_types_t type = extent_types_t::ROOT;
+ DENC(alloc_blk_t, v, p) {
+ DENC_START(1, 1, p);
+ denc(v.paddr, p);
+ denc(v.laddr, p);
+ denc(v.len, p);
+ denc(v.type, p);
+ DENC_FINISH(p);
+ }
+};
+
// use absolute address
struct alloc_delta_t {
enum class op_types_t : uint8_t {
SET = 1,
CLEAR = 2
};
- std::vector<std::pair<paddr_t, size_t>> alloc_blk_ranges;
+ std::vector<alloc_blk_t> alloc_blk_ranges;
op_types_t op = op_types_t::NONE;
alloc_delta_t() = default;
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::record_group_header_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::extent_info_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_header_t)
+WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::alloc_blk_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::alloc_delta_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_tail_t)
p.first * block_size,
rbm_manager->get_device_id());
size_t len = p.second * block_size;
- alloc_info.alloc_blk_ranges.push_back(std::make_pair(paddr, len));
- alloc_info.op = rbm_alloc_delta_t::op_types_t::SET;
+ alloc_info.alloc_blk_ranges.emplace_back(
+ paddr, L_ADDR_NULL, len, extent_types_t::ROOT);
+ alloc_info.op = alloc_delta_t::op_types_t::SET;
}
t.add_rbm_allocated_blocks(alloc_info);
}
for (auto p : allocated_blocks) {
for (auto b : p.alloc_blk_ranges) {
rbm_abs_addr addr =
- convert_paddr_to_abs_addr(b.first);
- alloc_ids.insert(addr / block_size, b.second / block_size);
+ convert_paddr_to_abs_addr(b.paddr);
+ alloc_ids.insert(addr / block_size, b.len / block_size);
}
}
logger().debug(" get allocated blockid {}", alloc_ids);