fixed_kv_extent.get_user_hint(),
// get target rewrite generation
fixed_kv_extent.get_rewrite_generation());
- fixed_kv_extent.get_bptr().copy_out(
- 0,
- fixed_kv_extent.get_length(),
- n_fixed_kv_extent->get_bptr().c_str());
- n_fixed_kv_extent->set_modify_time(fixed_kv_extent.get_modify_time());
- n_fixed_kv_extent->range = n_fixed_kv_extent->get_node_meta();
- n_fixed_kv_extent->set_last_committed_crc(fixed_kv_extent.get_last_committed_crc());
-
- if (fixed_kv_extent.get_type() == internal_node_t::TYPE ||
- leaf_node_t::do_has_children) {
- if (!fixed_kv_extent.is_pending()) {
- n_fixed_kv_extent->copy_sources.emplace(&fixed_kv_extent);
- n_fixed_kv_extent->prior_instance = &fixed_kv_extent;
- } else {
- ceph_assert(fixed_kv_extent.is_mutation_pending());
- n_fixed_kv_extent->copy_sources.emplace(
- (typename internal_node_t::base_t*
- )fixed_kv_extent.get_prior_instance().get());
- n_fixed_kv_extent->children = std::move(fixed_kv_extent.children);
- n_fixed_kv_extent->prior_instance = fixed_kv_extent.get_prior_instance();
- n_fixed_kv_extent->adjust_ptracker_for_children();
- }
- }
-
- /* This is a bit underhanded. Any relative addrs here must necessarily
- * be record relative as we are rewriting a dirty extent. Thus, we
- * are using resolve_relative_addrs with a (likely negative) block
- * relative offset to correct them to block-relative offsets adjusted
- * for our new transaction location.
- *
- * Upon commit, these now block relative addresses will be interpretted
- * against the real final address.
- */
- if (!n_fixed_kv_extent->get_paddr().is_absolute()) {
- // backend_type_t::SEGMENTED
- assert(n_fixed_kv_extent->get_paddr().is_record_relative());
- n_fixed_kv_extent->resolve_relative_addrs(
- make_record_relative_paddr(0).block_relative_to(
- n_fixed_kv_extent->get_paddr()));
- } // else: backend_type_t::RANDOM_BLOCK
+ n_fixed_kv_extent->rewrite(fixed_kv_extent, 0);
SUBTRACET(
seastore_fixedkv_tree,
(get_node_size() - offset - 1) * sizeof(ChildableCachedExtent*));
}
+ virtual bool have_children() const = 0;
+
+ void on_rewrite(CachedExtent &extent, extent_len_t off) final {
+ assert(get_type() == extent.get_type());
+ assert(off == 0);
+ auto &foreign_extent = (FixedKVNode&)extent;
+ range = get_node_meta();
+
+ if (have_children()) {
+ if (!foreign_extent.is_pending()) {
+ copy_sources.emplace(&foreign_extent);
+ } else {
+ ceph_assert(foreign_extent.is_mutation_pending());
+ copy_sources.emplace(
+ foreign_extent.get_prior_instance()->template cast<FixedKVNode>());
+ children = std::move(foreign_extent.children);
+ adjust_ptracker_for_children();
+ }
+ }
+
+ /* This is a bit underhanded. Any relative addrs here must necessarily
+ * be record relative as we are rewriting a dirty extent. Thus, we
+ * are using resolve_relative_addrs with a (likely negative) block
+ * relative offset to correct them to block-relative offsets adjusted
+ * for our new transaction location.
+ *
+ * Upon commit, these now block relative addresses will be interpretted
+ * against the real final address.
+ */
+ if (!get_paddr().is_absolute()) {
+ // backend_type_t::SEGMENTED
+ assert(get_paddr().is_record_relative());
+ resolve_relative_addrs(
+ make_record_relative_paddr(0).block_relative_to(get_paddr()));
+ } // else: backend_type_t::RANDOM_BLOCK
+ }
+
FixedKVNode& get_stable_for_key(node_key_t key) const {
ceph_assert(is_pending());
if (is_mutation_pending()) {
reset_parent_tracker();
}
- bool is_rewrite() {
- return is_initial_pending() && get_prior_instance();
- }
-
void on_initial_write() final {
// All in-memory relative addrs are necessarily block-relative
resolve_relative_addrs(get_paddr());
: FixedKVNode<NODE_KEY>(rhs),
node_layout_t(this->get_bptr().c_str()) {}
+ bool have_children() const final {
+ return true;
+ }
+
bool is_leaf_and_has_children() const final {
return false;
}
static constexpr bool do_has_children = has_children;
+ bool have_children() const final {
+ return do_has_children;
+ }
+
bool is_leaf_and_has_children() const final {
return has_children;
}
i->on_initial_write();
i->state = CachedExtent::extent_state_t::CLEAN;
+ i->prior_instance.reset();
DEBUGT("add extent as fresh, inline={} -- {}",
t, is_inline, *i);
const auto t_src = t.get_src();
return true;
}
+ void rewrite(CachedExtent &e, extent_len_t o) {
+ assert(is_initial_pending());
+ if (!e.is_pending()) {
+ prior_instance = &e;
+ } else {
+ assert(e.is_mutation_pending());
+ prior_instance = e.get_prior_instance();
+ }
+ e.get_bptr().copy_out(
+ o,
+ get_length(),
+ get_bptr().c_str());
+ set_modify_time(e.get_modify_time());
+ set_last_committed_crc(e.get_last_committed_crc());
+ on_rewrite(e, o);
+ }
+
+ /**
+ * on_rewrite
+ *
+ * Called when this extent is rewriting another one.
+ *
+ */
+ virtual void on_rewrite(CachedExtent &, extent_len_t) = 0;
+
friend std::ostream &operator<<(std::ostream &, extent_state_t);
virtual std::ostream &print_detail(std::ostream &out) const { return out; }
std::ostream &print(std::ostream &out) const {
return is_mutable() || state == extent_state_t::EXIST_CLEAN;
}
+ bool is_rewrite() {
+ return is_initial_pending() && get_prior_instance();
+ }
+
/// Returns true if extent is stable, written and shared among transactions
bool is_stable_written() const {
return state == extent_state_t::CLEAN_PENDING ||
*/
virtual void update_in_extent_chksum_field(uint32_t) {}
+ void set_prior_instance(CachedExtentRef p) {
+ prior_instance = p;
+ }
+
/// Sets last_committed_crc
void set_last_committed_crc(uint32_t crc) {
last_committed_crc = crc;
return false;
}
+ void on_rewrite(CachedExtent&, extent_len_t) final {}
+
std::ostream &print_detail(std::ostream &out) const final {
return out << ", RetiredExtentPlaceholder";
}
: ChildableCachedExtent(std::forward<T>(t)...)
{}
+ void on_rewrite(CachedExtent &extent, extent_len_t off) final {
+ assert(get_type() == extent.get_type());
+ auto &lextent = (LogicalCachedExtent&)extent;
+ set_laddr(lextent.get_laddr() + off);
+ }
+
bool has_laddr() const {
return laddr != L_ADDR_NULL;
}
backref_root_node(nullptr)
{}
+ void on_rewrite(CachedExtent&, extent_len_t) final {}
+
CachedExtentRef duplicate_for_write(Transaction&) final {
return CachedExtentRef(new RootBlock(*this));
};
lextent->get_user_hint(),
// get target rewrite generation
lextent->get_rewrite_generation())->cast<LogicalCachedExtent>();
- lextent->get_bptr().copy_out(
- 0,
- lextent->get_length(),
- nlextent->get_bptr().c_str());
- nlextent->set_laddr(lextent->get_laddr());
- nlextent->set_modify_time(lextent->get_modify_time());
+ nlextent->rewrite(*lextent, 0);
DEBUGT("rewriting logical extent -- {} to {}", t, *lextent, *nlextent);
bool first_extent = (off == 0);
ceph_assert(left >= nextent->get_length());
auto nlextent = nextent->template cast<LogicalCachedExtent>();
- lextent->get_bptr().copy_out(
- off,
- nlextent->get_length(),
- nlextent->get_bptr().c_str());
- nlextent->set_laddr(lextent->get_laddr() + off);
- nlextent->set_modify_time(lextent->get_modify_time());
- nlextent->set_last_committed_crc(lextent->get_last_committed_crc());
+ nlextent->rewrite(*lextent, off);
DEBUGT("rewriting logical extent -- {} to {}", t, *lextent, *nlextent);
/* This update_mapping is, strictly speaking, unnecessary for delayed_alloc
std::vector<test_block_delta_t> delta = {};
+ void on_rewrite(CachedExtent&, extent_len_t) final {}
+
TestBlockPhysical(ceph::bufferptr &&ptr)
: CachedExtent(std::move(ptr)) {}
TestBlockPhysical(const TestBlockPhysical &other)