});
}
-BtreeBackrefManager::check_child_trackers_ret
-BtreeBackrefManager::check_child_trackers(
- Transaction &t) {
- auto c = get_context(t);
- return with_btree<BackrefBtree>(
- cache, c,
- [c](auto &btree) {
- return btree.check_child_trackers(c);
- });
-}
-
BtreeBackrefManager::scan_mapped_space_ret
BtreeBackrefManager::scan_mapped_space(
Transaction &t,
Transaction &t,
paddr_t offset) final;
- check_child_trackers_ret check_child_trackers(Transaction &t) final;
-
scan_mapped_space_ret scan_mapped_space(
Transaction &t,
scan_mapped_space_func_t &&f) final;
Transaction &t,
paddr_t offset) = 0;
- using check_child_trackers_ret = base_iertr::future<>;
- virtual check_child_trackers_ret check_child_trackers(Transaction &t) = 0;
-
/**
* scan all extents in both tree and cache,
* including backref extents, logical extents and lba extents,
*/
CachedExtentRef parent;
- val_t value;
+ pladdr_t value;
extent_len_t len;
fixed_kv_node_meta_t<key_t> range;
uint16_t pos = std::numeric_limits<uint16_t>::max();
+ pladdr_t _get_val() const final {
+ return value;
+ }
+
public:
using val_type = val_t;
BtreeNodeMapping(op_context_t<key_t> ctx) : ctx(ctx) {}
op_context_t<key_t> ctx,
CachedExtentRef parent,
uint16_t pos,
- val_t &value,
+ pladdr_t value,
extent_len_t len,
fixed_kv_node_meta_t<key_t> &&meta)
: ctx(ctx),
if constexpr (
std::is_same_v<crimson::os::seastore::lba_manager::btree::lba_map_val_t,
node_val_t>) {
- ret.paddr = ret.paddr.maybe_relative_to(leaf.node->get_paddr());
+ if (ret.pladdr.is_paddr()) {
+ ret.pladdr = ret.pladdr.get_paddr().maybe_relative_to(
+ leaf.node->get_paddr());
+ }
}
return ret;
}
return upper_bound(c, min_max_t<node_key_t>::max);
}
- template <typename child_node_t, typename node_t>
+ template <typename child_node_t, typename node_t, bool lhc = leaf_has_children,
+ typename std::enable_if<lhc, int>::type = 0>
void check_node(
op_context_t<node_key_t> c,
TCachedExtentRef<node_t> node)
{
+ assert(leaf_has_children);
for (auto i : *node) {
CachedExtentRef child_node;
Transaction::get_extent_ret ret;
i->get_val().maybe_relative_to(node->get_paddr()),
&child_node);
} else {
- if constexpr (leaf_has_children) {
- ret = c.trans.get_extent(
- i->get_val().paddr.maybe_relative_to(node->get_paddr()),
- &child_node);
- }
+ assert(i->get_val().pladdr.is_paddr());
+ ret = c.trans.get_extent(
+ i->get_val().pladdr.get_paddr().maybe_relative_to(node->get_paddr()),
+ &child_node);
}
if (ret == Transaction::get_extent_ret::PRESENT) {
if (child_node->is_stable()) {
assert(!c.cache.query_cache(i->get_val(), nullptr));
} else {
if constexpr (leaf_has_children) {
- assert(!c.cache.query_cache(i->get_val().paddr, nullptr));
+ assert(i->get_val().pladdr.is_paddr()
+ ? (bool)!c.cache.query_cache(
+ i->get_val().pladdr.get_paddr(), nullptr)
+ : true);
}
}
}
}
using check_child_trackers_ret = base_iertr::future<>;
+ template <bool lhc = leaf_has_children,
+ typename std::enable_if<lhc, int>::type = 0>
check_child_trackers_ret check_child_trackers(
op_context_t<node_key_t> c) {
mapped_space_visitor_t checker = [c, this](
c,
*state.insert_iter,
state.last_end,
- lba_map_val_t{len, addr, 1, 0},
+ lba_map_val_t{len, pladdr_t(addr), 1, 0}
nextent
).si_then([&state, FNAME, c, addr, len, hint, nextent](auto &&p) {
auto [iter, inserted] = std::move(p);
LOG_PREFIX(BtreeLBAManager::init_cached_extent);
if (!iter.is_end() &&
iter.get_key() == logn->get_laddr() &&
- iter.get_val().paddr == logn->get_paddr()) {
+ iter.get_val().pladdr.is_paddr() &&
+ iter.get_val().pladdr.get_paddr() == logn->get_paddr()) {
assert(!iter.get_leaf_node()->is_pending());
iter.get_leaf_node()->link_child(logn.get(), iter.get_leaf_pos());
logn->set_laddr(iter.get_pin(c)->get_key());
seastar::stop_iteration::yes);
}
ceph_assert((pos.get_key() + pos.get_val().len) > begin);
- f(pos.get_key(), pos.get_val().paddr, pos.get_val().len);
+ f(pos.get_key(), pos.get_val().pladdr, pos.get_val().len);
return typename LBABtree::iterate_repeat_ret_inner(
interruptible::ready_future_marker{},
seastar::stop_iteration::no);
const lba_map_val_t &in) {
assert(!addr.is_null());
lba_map_val_t ret = in;
- ceph_assert(in.paddr == prev_addr);
- ret.paddr = addr;
+ ceph_assert(in.pladdr.is_paddr());
+ ceph_assert(in.pladdr.get_paddr() == prev_addr);
+ ret.pladdr = addr;
return ret;
},
nextent
DEBUGT("laddr={}, delta={} done -- {}", t, addr, delta, result);
return ref_update_result_t{
result.refcount,
- result.paddr,
+ result.pladdr,
result.len
};
});
c,
parent,
pos,
- val.paddr,
+ val.pladdr,
val.len,
std::forward<lba_node_meta_t>(meta))
{}
std::ostream& operator<<(std::ostream& out, const lba_map_val_t& v)
{
return out << "lba_map_val_t("
- << v.paddr
+ << v.pladdr
<< "~" << v.len
<< ", refcount=" << v.refcount
<< ", checksum=" << v.checksum
{
LOG_PREFIX(LBALeafNode::resolve_relative_addrs);
for (auto i: *this) {
- if (i->get_val().paddr.is_relative()) {
- auto val = i->get_val();
- val.paddr = base.add_relative(val.paddr);
- TRACE("{} -> {}", i->get_val().paddr, val.paddr);
+ auto val = i->get_val();
+ if (val.pladdr.is_paddr() &&
+ val.pladdr.get_paddr().is_relative()) {
+ val.pladdr = base.add_relative(val.pladdr.get_paddr());
+ TRACE("{} -> {}", i->get_val().pladdr, val.pladdr);
i->set_val(val);
}
}
*/
struct lba_map_val_t {
extent_len_t len = 0; ///< length of mapping
- paddr_t paddr; ///< physical addr of mapping
+ pladdr_t pladdr; ///< physical addr of mapping or
+ // laddr of a physical lba mapping(see btree_lba_manager.h)
uint32_t refcount = 0; ///< refcount
uint32_t checksum = 0; ///< checksum of original block written at paddr (TODO)
lba_map_val_t() = default;
lba_map_val_t(
extent_len_t len,
- paddr_t paddr,
+ pladdr_t pladdr,
uint32_t refcount,
uint32_t checksum)
- : len(len), paddr(paddr), refcount(refcount), checksum(checksum) {}
+ : len(len), pladdr(pladdr), refcount(refcount), checksum(checksum) {}
bool operator==(const lba_map_val_t&) const = default;
};
* size : uint32_t[1] 4b
* (padding) : 4b
* meta : lba_node_meta_le_t[3] (1*24)b
- * keys : laddr_t[170] (145*8)b
- * values : lba_map_val_t[170] (145*20)b
+ * keys : laddr_t[170] (140*8)b
+ * values : lba_map_val_t[170] (140*21)b
* = 4092
*
* TODO: update FixedKVNodeLayout to handle the above calculation
* TODO: the above alignment probably isn't portable without further work
*/
-constexpr size_t LEAF_NODE_CAPACITY = 145;
+constexpr size_t LEAF_NODE_CAPACITY = 140;
/**
* lba_map_val_le_t
*/
struct lba_map_val_le_t {
extent_len_le_t len = init_extent_len_le(0);
- paddr_le_t paddr;
+ pladdr_le_t pladdr;
ceph_le32 refcount{0};
ceph_le32 checksum{0};
lba_map_val_le_t(const lba_map_val_le_t &) = default;
explicit lba_map_val_le_t(const lba_map_val_t &val)
: len(init_extent_len_le(val.len)),
- paddr(paddr_le_t(val.paddr)),
+ pladdr(pladdr_le_t(val.pladdr)),
refcount(val.refcount),
checksum(val.checksum) {}
operator lba_map_val_t() const {
- return lba_map_val_t{ len, paddr, refcount, checksum };
+ return lba_map_val_t{ len, pladdr, refcount, checksum };
}
};
// child-ptr may already be correct, see LBAManager::update_mappings()
this->update_child_ptr(iter, nextent);
}
- val.paddr = this->maybe_generate_relative(val.paddr);
+ if (val.pladdr.is_paddr()) {
+ val.pladdr = maybe_generate_relative(val.pladdr.get_paddr());
+ }
return this->journal_update(
iter,
val,
addr,
(void*)nextent);
this->insert_child_ptr(iter, nextent);
- val.paddr = this->maybe_generate_relative(val.paddr);
+ if (val.pladdr.is_paddr()) {
+ val.pladdr = maybe_generate_relative(val.pladdr.get_paddr());
+ }
this->journal_insert(
iter,
addr,
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
- if (val.paddr.is_relative()) {
- assert(val.paddr.is_block_relative());
- val.paddr = this->get_paddr().add_relative(val.paddr);
+ if (val.pladdr.is_paddr()
+ && val.pladdr.get_paddr().is_relative()) {
+ assert(val.pladdr.get_paddr().is_block_relative());
+ val.pladdr = this->get_paddr().add_relative(val.pladdr.get_paddr());
i->set_val(val);
}
}
if (this->is_initial_pending()) {
for (auto i = from; i != to; ++i) {
auto val = i->get_val();
- if (val.paddr.is_relative()) {
- auto val = i->get_val();
- assert(val.paddr.is_record_relative());
- val.paddr = val.paddr.block_relative_to(this->get_paddr());
+ if (val.pladdr.is_paddr()
+ && val.pladdr.get_paddr().is_relative()) {
+ assert(val.pladdr.get_paddr().is_record_relative());
+ val.pladdr = val.pladdr.get_paddr().block_relative_to(this->get_paddr());
i->set_val(val);
}
}
}
}
+std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr)
+{
+ if (pladdr.is_laddr()) {
+ return out << pladdr.get_laddr();
+ } else {
+ return out << pladdr.get_paddr();
+ }
+}
+
std::ostream &operator<<(std::ostream &out, const paddr_t &rhs)
{
auto id = rhs.get_device_id();
struct seg_paddr_t;
struct blk_paddr_t;
struct res_paddr_t;
+struct pladdr_t;
struct paddr_t {
public:
// P_ADDR_MAX == P_ADDR_NULL == paddr_t{}
static_cast<u_device_off_t>(offset)) {}
friend struct paddr_le_t;
+ friend struct pladdr_le_t;
+
};
std::ostream &operator<<(std::ostream &out, const paddr_t &rhs);
}
};
+constexpr uint64_t PL_ADDR_NULL = std::numeric_limits<uint64_t>::max();
+
+struct pladdr_t {
+ std::variant<laddr_t, paddr_t> pladdr;
+
+ pladdr_t() = default;
+ pladdr_t(const pladdr_t &) = default;
+ explicit pladdr_t(laddr_t laddr)
+ : pladdr(laddr) {}
+ explicit pladdr_t(paddr_t paddr)
+ : pladdr(paddr) {}
+
+ bool is_laddr() const {
+ return pladdr.index() == 0;
+ }
+
+ bool is_paddr() const {
+ return pladdr.index() == 1;
+ }
+
+ pladdr_t& operator=(paddr_t paddr) {
+ pladdr = paddr;
+ return *this;
+ }
+
+ pladdr_t& operator=(laddr_t laddr) {
+ pladdr = laddr;
+ return *this;
+ }
+
+ bool operator==(const pladdr_t &) const = default;
+
+ paddr_t get_paddr() const {
+ assert(pladdr.index() == 1);
+ return paddr_t(std::get<1>(pladdr));
+ }
+
+ laddr_t get_laddr() const {
+ assert(pladdr.index() == 0);
+ return laddr_t(std::get<0>(pladdr));
+ }
+
+};
+
+std::ostream &operator<<(std::ostream &out, const pladdr_t &pladdr);
+
+enum class addr_type_t : uint8_t {
+ PADDR=0,
+ LADDR=1,
+ MAX=2 // or NONE
+};
+
+struct __attribute((packed)) pladdr_le_t {
+ ceph_le64 pladdr = ceph_le64(PL_ADDR_NULL);
+ addr_type_t addr_type = addr_type_t::MAX;
+
+ pladdr_le_t() = default;
+ pladdr_le_t(const pladdr_le_t &) = default;
+ explicit pladdr_le_t(const pladdr_t &addr)
+ : pladdr(
+ ceph_le64(
+ addr.is_laddr() ?
+ std::get<0>(addr.pladdr) :
+ std::get<1>(addr.pladdr).internal_paddr)),
+ addr_type(
+ addr.is_laddr() ?
+ addr_type_t::LADDR :
+ addr_type_t::PADDR)
+ {}
+
+ operator pladdr_t() const {
+ if (addr_type == addr_type_t::LADDR) {
+ return pladdr_t(laddr_t(pladdr));
+ } else {
+ assert(addr_type == addr_type_t::PADDR);
+ return pladdr_t(paddr_t(pladdr));
+ }
+ }
+};
+
// logical offset, see LBAManager, TransactionManager
using extent_len_t = uint32_t;
constexpr extent_len_t EXTENT_LEN_MAX =
template <> struct fmt::formatter<crimson::os::seastore::omap_root_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::paddr_list_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::paddr_t> : fmt::ostream_formatter {};
+template <> struct fmt::formatter<crimson::os::seastore::pladdr_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::placement_hint_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::device_type_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<crimson::os::seastore::record_group_header_t> : fmt::ostream_formatter {};
}
static auto get_map_val(extent_len_t len) {
- return lba_map_val_t{0, P_ADDR_NULL, len, 0};
+ return lba_map_val_t{0, (pladdr_t)P_ADDR_NULL, len, 0};
}
device_off_t next_off = 0;