return paddr;
}
+ std::list<alloc_paddr_result> alloc_paddrs(extent_len_t length) {
+ // TODO: implement allocation strategy (dirty metadata and multiple devices)
+ auto rbs = rb_group->get_rb_managers();
+ auto ret = rbs[0]->alloc_extents(length);
+ stats.used_bytes += length;
+ return ret;
+ }
+
size_t get_total_bytes() const {
auto rbs = rb_group->get_rb_managers();
size_t total = 0;
using mkfs_ret = phy_tree_root_t;
static mkfs_ret mkfs(RootBlockRef &root_block, op_context_t<node_key_t> c) {
assert(root_block->is_mutation_pending());
- auto root_leaf = c.cache.template alloc_new_extent<leaf_node_t>(
+ auto root_leaf = c.cache.template alloc_new_non_data_extent<leaf_node_t>(
c.trans,
node_size,
placement_hint_t::HOT,
assert(is_lba_backref_node(e->get_type()));
auto do_rewrite = [&](auto &fixed_kv_extent) {
- auto n_fixed_kv_extent = c.cache.template alloc_new_extent<
+ auto n_fixed_kv_extent = c.cache.template alloc_new_non_data_extent<
std::remove_reference_t<decltype(fixed_kv_extent)>
>(
c.trans,
SUBTRACET(seastore_fixedkv_tree, "split_from {}, depth {}", c.trans, split_from, iter.get_depth());
if (split_from == iter.get_depth()) {
- auto nroot = c.cache.template alloc_new_extent<internal_node_t>(
+ auto nroot = c.cache.template alloc_new_non_data_extent<internal_node_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
fixed_kv_node_meta_t<node_key_t> meta{
min_max_t<node_key_t>::min, min_max_t<node_key_t>::max, iter.get_depth() + 1};
std::tuple<Ref, Ref, NODE_KEY>
make_split_children(op_context_t<NODE_KEY> c) {
- auto left = c.cache.template alloc_new_extent<node_type_t>(
+ auto left = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
- auto right = c.cache.template alloc_new_extent<node_type_t>(
+ auto right = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
this->split_child_ptrs(*left, *right);
auto pivot = this->split_into(*left, *right);
Ref make_full_merge(
op_context_t<NODE_KEY> c,
Ref &right) {
- auto replacement = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
replacement->merge_child_ptrs(*this, *right);
replacement->merge_from(*this, *right->template cast<node_type_t>());
bool prefer_left) {
ceph_assert(_right->get_type() == this->get_type());
auto &right = *_right->template cast<node_type_t>();
- auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement_left = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
- auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement_right = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto pivot = this->balance_into_new_nodes(
std::tuple<Ref, Ref, NODE_KEY>
make_split_children(op_context_t<NODE_KEY> c) {
- auto left = c.cache.template alloc_new_extent<node_type_t>(
+ auto left = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
- auto right = c.cache.template alloc_new_extent<node_type_t>(
+ auto right = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
if constexpr (has_children) {
this->split_child_ptrs(*left, *right);
Ref make_full_merge(
op_context_t<NODE_KEY> c,
Ref &right) {
- auto replacement = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
if constexpr (has_children) {
replacement->merge_child_ptrs(*this, *right);
bool prefer_left) {
ceph_assert(_right->get_type() == this->get_type());
auto &right = *_right->template cast<node_type_t>();
- auto replacement_left = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement_left = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
- auto replacement_right = c.cache.template alloc_new_extent<node_type_t>(
+ auto replacement_right = c.cache.template alloc_new_non_data_extent<node_type_t>(
c.trans, node_size, placement_hint_t::HOT, INIT_GENERATION);
auto pivot = this->balance_into_new_nodes(
LOG_PREFIX(Cache::alloc_new_extent_by_type);
SUBDEBUGT(seastore_cache, "allocate {} {}B, hint={}, gen={}",
t, type, length, hint, rewrite_gen_printer_t{gen});
+ ceph_assert(get_extent_category(type) == data_category_t::METADATA);
switch (type) {
case extent_types_t::ROOT:
ceph_assert(0 == "ROOT is never directly alloc'd");
return CachedExtentRef();
case extent_types_t::LADDR_INTERNAL:
- return alloc_new_extent<lba_manager::btree::LBAInternalNode>(t, length, hint, gen);
+ return alloc_new_non_data_extent<lba_manager::btree::LBAInternalNode>(t, length, hint, gen);
case extent_types_t::LADDR_LEAF:
- return alloc_new_extent<lba_manager::btree::LBALeafNode>(
+ return alloc_new_non_data_extent<lba_manager::btree::LBALeafNode>(
t, length, hint, gen);
case extent_types_t::ONODE_BLOCK_STAGED:
- return alloc_new_extent<onode::SeastoreNodeExtent>(t, length, hint, gen);
+ return alloc_new_non_data_extent<onode::SeastoreNodeExtent>(
+ t, length, hint, gen);
case extent_types_t::OMAP_INNER:
- return alloc_new_extent<omap_manager::OMapInnerNode>(t, length, hint, gen);
+ return alloc_new_non_data_extent<omap_manager::OMapInnerNode>(
+ t, length, hint, gen);
case extent_types_t::OMAP_LEAF:
- return alloc_new_extent<omap_manager::OMapLeafNode>(t, length, hint, gen);
+ return alloc_new_non_data_extent<omap_manager::OMapLeafNode>(
+ t, length, hint, gen);
case extent_types_t::COLL_BLOCK:
- return alloc_new_extent<collection_manager::CollectionNode>(t, length, hint, gen);
- case extent_types_t::OBJECT_DATA_BLOCK:
- return alloc_new_extent<ObjectDataBlock>(t, length, hint, gen);
+ return alloc_new_non_data_extent<collection_manager::CollectionNode>(
+ t, length, hint, gen);
case extent_types_t::RETIRED_PLACEHOLDER:
ceph_assert(0 == "impossible");
return CachedExtentRef();
- case extent_types_t::TEST_BLOCK:
- return alloc_new_extent<TestBlock>(t, length, hint, gen);
case extent_types_t::TEST_BLOCK_PHYSICAL:
- return alloc_new_extent<TestBlockPhysical>(t, length, hint, gen);
+ return alloc_new_non_data_extent<TestBlockPhysical>(t, length, hint, gen);
case extent_types_t::NONE: {
ceph_assert(0 == "NONE is an invalid extent type");
return CachedExtentRef();
}
}
+std::vector<CachedExtentRef> Cache::alloc_new_data_extents_by_type(
+ Transaction &t, ///< [in, out] current transaction
+ extent_types_t type, ///< [in] type tag
+ extent_len_t length, ///< [in] length
+ placement_hint_t hint, ///< [in] user hint
+ rewrite_gen_t gen ///< [in] rewrite generation
+)
+{
+ LOG_PREFIX(Cache::alloc_new_data_extents_by_type);
+ SUBDEBUGT(seastore_cache, "allocate {} {}B, hint={}, gen={}",
+ t, type, length, hint, rewrite_gen_printer_t{gen});
+ ceph_assert(get_extent_category(type) == data_category_t::DATA);
+ std::vector<CachedExtentRef> res;
+ switch (type) {
+ case extent_types_t::OBJECT_DATA_BLOCK:
+ {
+ auto extents = alloc_new_data_extents<ObjectDataBlock>(t, length, hint, gen);
+ res.insert(res.begin(), extents.begin(), extents.end());
+ }
+ return res;
+ case extent_types_t::TEST_BLOCK:
+ {
+ auto extents = alloc_new_data_extents<TestBlock>(t, length, hint, gen);
+ res.insert(res.begin(), extents.begin(), extents.end());
+ }
+ return res;
+ default:
+ ceph_assert(0 == "impossible");
+ return res;
+ }
+}
+
CachedExtentRef Cache::duplicate_for_write(
Transaction &t,
CachedExtentRef i) {
}
/**
- * alloc_new_extent
+ * alloc_new_non_data_extent
*
* Allocates a fresh extent. if delayed is true, addr will be alloc'd later.
* Note that epaddr can only be fed by the btree lba unittest for now
*/
template <typename T>
- TCachedExtentRef<T> alloc_new_extent(
+ TCachedExtentRef<T> alloc_new_non_data_extent(
Transaction &t, ///< [in, out] current transaction
extent_len_t length, ///< [in] length
placement_hint_t hint, ///< [in] user hint
rewrite_gen_t gen
#endif
) {
- LOG_PREFIX(Cache::alloc_new_extent);
+ LOG_PREFIX(Cache::alloc_new_non_data_extent);
SUBTRACET(seastore_cache, "allocate {} {}B, hint={}, gen={}",
t, T::TYPE, length, hint, rewrite_gen_printer_t{gen});
#ifdef UNIT_TESTS_BUILT
- auto result = epm.alloc_new_extent(t, T::TYPE, length, hint, gen, epaddr);
+ auto result = epm.alloc_new_non_data_extent(t, T::TYPE, length, hint, gen, epaddr);
#else
- auto result = epm.alloc_new_extent(t, T::TYPE, length, hint, gen);
+ auto result = epm.alloc_new_non_data_extent(t, T::TYPE, length, hint, gen);
#endif
auto ret = CachedExtent::make_cached_extent_ref<T>(std::move(result.bp));
ret->init(CachedExtent::extent_state_t::INITIAL_WRITE_PENDING,
hint, rewrite_gen_printer_t{result.gen}, *ret);
return ret;
}
+ /**
+ * alloc_new_data_extents
+ *
+ * Allocates a fresh extent. if delayed is true, addr will be alloc'd later.
+ * Note that epaddr can only be fed by the btree lba unittest for now
+ */
+ template <typename T>
+ std::vector<TCachedExtentRef<T>> alloc_new_data_extents(
+ Transaction &t, ///< [in, out] current transaction
+ extent_len_t length, ///< [in] length
+ placement_hint_t hint, ///< [in] user hint
+#ifdef UNIT_TESTS_BUILT
+ rewrite_gen_t gen, ///< [in] rewrite generation
+ std::optional<paddr_t> epaddr = std::nullopt ///< [in] paddr fed by callers
+#else
+ rewrite_gen_t gen
+#endif
+ ) {
+ LOG_PREFIX(Cache::alloc_new_data_extents);
+ SUBTRACET(seastore_cache, "allocate {} {}B, hint={}, gen={}",
+ t, T::TYPE, length, hint, rewrite_gen_printer_t{gen});
+#ifdef UNIT_TESTS_BUILT
+ auto results = epm.alloc_new_data_extents(t, T::TYPE, length, hint, gen, epaddr);
+#else
+ auto results = epm.alloc_new_data_extents(t, T::TYPE, length, hint, gen);
+#endif
+ std::vector<TCachedExtentRef<T>> extents;
+ for (auto &result : results) {
+ auto ret = CachedExtent::make_cached_extent_ref<T>(std::move(result.bp));
+ ret->init(CachedExtent::extent_state_t::INITIAL_WRITE_PENDING,
+ result.paddr,
+ hint,
+ result.gen,
+ t.get_trans_id());
+ t.add_fresh_extent(ret);
+ SUBDEBUGT(seastore_cache,
+ "allocated {} {}B extent at {}, hint={}, gen={} -- {}",
+ t, T::TYPE, length, result.paddr,
+ hint, rewrite_gen_printer_t{result.gen}, *ret);
+ extents.emplace_back(std::move(ret));
+ }
+ return extents;
+ }
/**
* alloc_remapped_extent
rewrite_gen_t gen ///< [in] rewrite generation
);
+ /**
+ * alloc_new_extent
+ *
+ * Allocates a fresh extent. addr will be relative until commit.
+ */
+ std::vector<CachedExtentRef> alloc_new_data_extents_by_type(
+ Transaction &t, ///< [in, out] current transaction
+ extent_types_t type, ///< [in] type tag
+ extent_len_t length, ///< [in] length
+ placement_hint_t hint, ///< [in] user hint
+ rewrite_gen_t gen ///< [in] rewrite generation
+ );
+
/**
* Allocates mutable buffer from extent_set on offset~len
*
{
logger().debug("FlatCollectionManager: {}", __func__);
- return tm.alloc_extent<CollectionNode>(
+ return tm.alloc_non_data_extent<CollectionNode>(
t, L_ADDR_MIN, MIN_FLAT_BLOCK_SIZE
).si_then([](auto&& root_extent) {
coll_root_t coll_root = coll_root_t(
// TODO return error probably, but such a nonsensically large number of
// collections would create a ton of other problems as well
assert(new_size < MAX_FLAT_BLOCK_SIZE);
- return tm.alloc_extent<CollectionNode>(
+ return tm.alloc_non_data_extent<CollectionNode>(
t, L_ADDR_MIN, new_size
).si_then([=, this, &coll_root, &t] (auto &&root_extent) {
coll_root.update(root_extent->get_laddr(), root_extent->get_length());
virtual paddr_t alloc_paddr(extent_len_t length) = 0;
+ virtual std::list<alloc_paddr_result> alloc_paddrs(extent_len_t length) = 0;
+
using alloc_write_ertr = base_ertr;
using alloc_write_iertr = trans_iertr<alloc_write_ertr>;
virtual alloc_write_iertr::future<> alloc_write_ool_extents(
return make_delayed_temp_paddr(0);
}
+ std::list<alloc_paddr_result> alloc_paddrs(extent_len_t length) final {
+ return {alloc_paddr_result{make_delayed_temp_paddr(0), length}};
+ }
+
bool can_inplace_rewrite(Transaction& t,
CachedExtentRef extent) final {
return false;
return rb_cleaner->alloc_paddr(length);
}
+ std::list<alloc_paddr_result> alloc_paddrs(extent_len_t length) final {
+ assert(rb_cleaner);
+ return rb_cleaner->alloc_paddrs(length);
+ }
+
bool can_inplace_rewrite(Transaction& t,
CachedExtentRef extent) final {
if (!extent->is_dirty()) {
bufferptr bp;
rewrite_gen_t gen;
};
- alloc_result_t alloc_new_extent(
+ alloc_result_t alloc_new_non_data_extent(
Transaction& t,
extent_types_t type,
extent_len_t length,
if (gen == INLINE_GENERATION) {
#endif
addr = make_record_relative_paddr(0);
- } else if (category == data_category_t::DATA) {
- assert(data_writers_by_gen[generation_to_writer(gen)]);
- addr = data_writers_by_gen[
- generation_to_writer(gen)]->alloc_paddr(length);
} else {
assert(category == data_category_t::METADATA);
assert(md_writers_by_gen[generation_to_writer(gen)]);
addr = md_writers_by_gen[
generation_to_writer(gen)]->alloc_paddr(length);
}
+ assert(!(category == data_category_t::DATA));
return {addr, std::move(bp), gen};
}
+ std::list<alloc_result_t> alloc_new_data_extents(
+ Transaction& t,
+ extent_types_t type,
+ extent_len_t length,
+ placement_hint_t hint,
+#ifdef UNIT_TESTS_BUILT
+ rewrite_gen_t gen,
+ std::optional<paddr_t> external_paddr = std::nullopt
+#else
+ rewrite_gen_t gen
+#endif
+ ) {
+ assert(hint < placement_hint_t::NUM_HINTS);
+ assert(is_target_rewrite_generation(gen));
+ assert(gen == INIT_GENERATION || hint == placement_hint_t::REWRITE);
+
+ data_category_t category = get_extent_category(type);
+ gen = adjust_generation(category, type, hint, gen);
+ assert(gen != INLINE_GENERATION);
+
+ // XXX: bp might be extended to point to different memory (e.g. PMem)
+ // according to the allocator.
+ std::list<alloc_result_t> allocs;
+#ifdef UNIT_TESTS_BUILT
+ if (unlikely(external_paddr.has_value())) {
+ assert(external_paddr->is_fake());
+ auto bp = ceph::bufferptr(
+ buffer::create_page_aligned(length));
+ bp.zero();
+ allocs.emplace_back(alloc_result_t{*external_paddr, std::move(bp), gen});
+ } else {
+#else
+ {
+#endif
+ assert(category == data_category_t::DATA);
+ assert(data_writers_by_gen[generation_to_writer(gen)]);
+ auto addrs = data_writers_by_gen[
+ generation_to_writer(gen)]->alloc_paddrs(length);
+ for (auto &ext : addrs) {
+ auto bp = ceph::bufferptr(
+ buffer::create_page_aligned(ext.len));
+ bp.zero();
+ allocs.emplace_back(alloc_result_t{ext.start, std::move(bp), gen});
+ }
+ }
+ return allocs;
+ }
+
/**
* dispatch_result_t
*
{
LOG_PREFIX(BtreeOMapManager::initialize_omap);
DEBUGT("hint: {}", t, hint);
- return tm.alloc_extent<OMapLeafNode>(t, hint, OMAP_LEAF_BLOCK_SIZE)
+ return tm.alloc_non_data_extent<OMapLeafNode>(t, hint, OMAP_LEAF_BLOCK_SIZE)
.si_then([hint, &t](auto&& root_extent) {
root_extent->set_size(0);
omap_node_meta_t meta{1};
{
LOG_PREFIX(BtreeOMapManager::handle_root_split);
DEBUGT("{}", oc.t, omap_root);
- return oc.tm.alloc_extent<OMapInnerNode>(oc.t, omap_root.hint,
+ return oc.tm.alloc_non_data_extent<OMapInnerNode>(oc.t, omap_root.hint,
OMAP_INNER_BLOCK_SIZE)
.si_then([&omap_root, mresult, oc](auto&& nroot) -> handle_root_split_ret {
auto [left, right, pivot] = *(mresult.split_tuple);
{
LOG_PREFIX(OMapInnerNode::make_full_merge);
DEBUGT("", oc.t);
- return oc.tm.alloc_extent<OMapInnerNode>(oc.t, oc.hint,
+ return oc.tm.alloc_non_data_extent<OMapInnerNode>(oc.t, oc.hint,
OMAP_INNER_BLOCK_SIZE)
.si_then([this, right] (auto &&replacement) {
replacement->merge_from(*this, *right->cast<OMapInnerNode>());
ceph_assert(right->get_type() == TYPE);
LOG_PREFIX(OMapLeafNode::make_full_merge);
DEBUGT("this: {}", oc.t, *this);
- return oc.tm.alloc_extent<OMapLeafNode>(oc.t, oc.hint, OMAP_LEAF_BLOCK_SIZE)
+ return oc.tm.alloc_non_data_extent<OMapLeafNode>(oc.t, oc.hint, OMAP_LEAF_BLOCK_SIZE)
.si_then([this, right] (auto &&replacement) {
replacement->merge_from(*this, *right->cast<OMapLeafNode>());
return full_merge_ret(
return alloc_iertr::make_ready_future<NodeExtentRef>();
}
}
- return tm.alloc_extent<SeastoreNodeExtent>(t, hint, len
+ return tm.alloc_non_data_extent<SeastoreNodeExtent>(t, hint, len
).si_then([len, &t](auto extent) {
SUBDEBUGT(seastore_onode,
"allocated {}B at {:#x} -- {}",
std::vector<laddr_t> offsets);
/**
- * alloc_extent
+ * alloc_non_data_extent
*
* Allocates a new block of type T with the minimum lba range of size len
* greater than laddr_hint.
template <typename T>
using alloc_extent_ret = alloc_extent_iertr::future<TCachedExtentRef<T>>;
template <typename T>
- alloc_extent_ret<T> alloc_extent(
+ alloc_extent_ret<T> alloc_non_data_extent(
Transaction &t,
laddr_t laddr_hint,
extent_len_t len,
placement_hint_t placement_hint = placement_hint_t::HOT) {
- LOG_PREFIX(TransactionManager::alloc_extent);
+ LOG_PREFIX(TransactionManager::alloc_non_data_extent);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
t, T::TYPE, len, placement_hint, laddr_hint);
ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
- auto ext = cache->alloc_new_extent<T>(
+ auto ext = cache->alloc_new_non_data_extent<T>(
t,
len,
placement_hint,
ext->get_paddr(),
*ext
).si_then([ext=std::move(ext), laddr_hint, &t](auto &&) mutable {
- LOG_PREFIX(TransactionManager::alloc_extent);
+ LOG_PREFIX(TransactionManager::alloc_non_data_extent);
SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint);
return alloc_extent_iertr::make_ready_future<TCachedExtentRef<T>>(
std::move(ext));
});
}
+ /**
+ * alloc_data_extents
+ *
+ * Allocates a new block of type T with the minimum lba range of size len
+ * greater than laddr_hint.
+ */
+ using alloc_extents_iertr = alloc_extent_iertr;
+ template <typename T>
+ using alloc_extents_ret = alloc_extents_iertr::future<
+ std::vector<TCachedExtentRef<T>>>;
+ template <typename T>
+ alloc_extents_ret<T> alloc_data_extents(
+ Transaction &t,
+ laddr_t laddr_hint,
+ extent_len_t len,
+ placement_hint_t placement_hint = placement_hint_t::HOT) {
+ LOG_PREFIX(TransactionManager::alloc_data_extents);
+ SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
+ t, T::TYPE, len, placement_hint, laddr_hint);
+ ceph_assert(is_aligned(laddr_hint, epm->get_block_size()));
+ auto exts = cache->alloc_new_data_extents<T>(
+ t,
+ len,
+ placement_hint,
+ INIT_GENERATION);
+ return seastar::do_with(
+ std::move(exts),
+ laddr_hint,
+ [this, &t](auto &exts, auto &laddr_hint) {
+ return trans_intr::do_for_each(
+ exts,
+ [this, &t, &laddr_hint](auto &ext) {
+ return lba_manager->alloc_extent(
+ t,
+ laddr_hint,
+ ext->get_length(),
+ ext->get_paddr(),
+ *ext
+ ).si_then([&ext, &laddr_hint, &t](auto &&) mutable {
+ LOG_PREFIX(TransactionManager::alloc_extents);
+ SUBDEBUGT(seastore_tm, "new extent: {}, laddr_hint: {}", t, *ext, laddr_hint);
+ laddr_hint += ext->get_length();
+ return alloc_extent_iertr::now();
+ });
+ }).si_then([&exts] {
+ return alloc_extent_iertr::make_ready_future<
+ std::vector<TCachedExtentRef<T>>>(std::move(exts));
+ });
+ });
+ }
+
template <typename T>
read_extent_ret<T> get_mutable_extent_by_laddr(Transaction &t, laddr_t laddr, extent_len_t len) {
return get_pin(t, laddr
*
* allocates more than one new blocks of type T.
*/
- using alloc_extents_iertr = alloc_extent_iertr;
template<class T>
alloc_extents_iertr::future<std::vector<TCachedExtentRef<T>>>
alloc_extents(
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[this, &t, len, hint, &extents] (auto i) {
- return alloc_extent<T>(t, hint, len).si_then(
+ return alloc_non_data_extent<T>(t, hint, len).si_then(
[&extents](auto &&node) {
extents.push_back(node);
});
crimson::ct_error::pass_further_all{}
).si_then([this, offset, &t, &ptr] {
logger().debug("dec_ref complete");
- return tm->alloc_extent<TestBlock>(t, offset, ptr.length());
- }).si_then([this, offset, &t, &ptr](auto ext) {
- boost::ignore_unused(offset); // avoid clang warning;
- assert(ext->get_laddr() == (size_t)offset);
- assert(ext->get_bptr().length() == ptr.length());
- ext->get_bptr().swap(ptr);
+ return tm->alloc_data_extents<TestBlock>(t, offset, ptr.length());
+ }).si_then([this, offset, &t, &ptr](auto extents) mutable {
+ boost::ignore_unused(offset); // avoid clang warning;
+ auto off = offset;
+ auto left = ptr.length();
+ size_t written = 0;
+ for (auto &ext : extents) {
+ assert(ext->get_laddr() == (size_t)off);
+ assert(ext->get_bptr().length() <= left);
+ ptr.copy_out(written, ext->get_length(), ext->get_bptr().c_str());
+ off += ext->get_length();
+ left -= ext->get_length();
+ }
+ assert(!left);
logger().debug("submitting transaction");
return tm->submit_transaction(t);
});
ceph_assert(check.count(addr) == 0);
check.emplace(addr, get_map_val(len));
lba_btree_update([=, this](auto &btree, auto &t) {
- auto extent = cache->alloc_new_extent<TestBlock>(
+ auto extents = cache->alloc_new_data_extents<TestBlock>(
t,
TestBlock::SIZE,
placement_hint_t::HOT,
0,
get_paddr());
+ assert(extents.size() == 1);
+ auto extent = extents.front();
return btree.insert(
get_op_context(t), addr, get_map_val(len), extent.get()
).si_then([addr, extent](auto p){
test_lba_mappings
};
if (create_fake_extent) {
- cache->alloc_new_extent<TestBlockPhysical>(
+ cache->alloc_new_non_data_extent<TestBlockPhysical>(
*t.t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
auto ret = with_trans_intr(
*t.t,
[=, this](auto &t) {
- auto extent = cache->alloc_new_extent<TestBlock>(
+ auto extents = cache->alloc_new_data_extents<TestBlock>(
t,
TestBlock::SIZE,
placement_hint_t::HOT,
0,
get_paddr());
+ assert(extents.size() == 1);
+ auto extent = extents.front();
return lba_manager->alloc_extent(
t, hint, len, extent->get_paddr(), *extent);
}).unsafe_get0();
int csum = 0;
{
auto t = get_transaction();
- auto extent = cache->alloc_new_extent<TestBlockPhysical>(
+ auto extent = cache->alloc_new_non_data_extent<TestBlockPhysical>(
*t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
{
// write out initial test block
auto t = get_transaction();
- auto extent = cache->alloc_new_extent<TestBlockPhysical>(
+ auto extent = cache->alloc_new_non_data_extent<TestBlockPhysical>(
*t,
TestBlockPhysical::SIZE,
placement_hint_t::HOT,
laddr_t hint,
extent_len_t len,
char contents) {
- auto extent = with_trans_intr(*(t.t), [&](auto& trans) {
- return tm->alloc_extent<TestBlock>(trans, hint, len);
+ auto extents = with_trans_intr(*(t.t), [&](auto& trans) {
+ return tm->alloc_data_extents<TestBlock>(trans, hint, len);
}).unsafe_get0();
+ assert(extents.size() == 1);
+ auto extent = extents.front();
extent->set_contents(contents);
EXPECT_FALSE(test_mappings.contains(extent->get_laddr(), t.mapping_delta));
EXPECT_EQ(len, extent->get_length());
boost::make_counting_iterator(0),
boost::make_counting_iterator(num),
[&t, this, size](auto) {
- return tm->alloc_extent<TestBlock>(
+ return tm->alloc_data_extents<TestBlock>(
*(t.t), L_ADDR_MIN, size
- ).si_then([&t, this, size](auto extent) {
+ ).si_then([&t, this, size](auto extents) {
+ assert(extents.size() == 1);
+ auto extent = extents.front();
extent->set_contents(get_random_contents());
EXPECT_FALSE(
test_mappings.contains(extent->get_laddr(), t.mapping_delta));
o_len - new_offset - new_len)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
- return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
+ return tm->alloc_data_extents<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_len,
- new_offset, o_laddr, &t, &bl](auto ext) mutable {
+ new_offset, o_laddr, &t, &bl](auto extents) mutable {
+ assert(extents.size() == 1);
+ auto ext = extents.front();
ceph_assert(ret.size() == 2);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());
o_len - new_offset - new_len)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
- return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
+ return tm->alloc_data_extents<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_offset, new_len,
- o_laddr, &t, &bl](auto ext) mutable {
+ o_laddr, &t, &bl](auto extents) mutable {
+ assert(extents.size() == 1);
+ auto ext = extents.front();
ceph_assert(ret.size() == 1);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());
new_offset)
}
).si_then([this, new_offset, new_len, o_laddr, &t, &bl](auto ret) {
- return tm->alloc_extent<TestBlock>(t, o_laddr + new_offset, new_len
+ return tm->alloc_data_extents<TestBlock>(t, o_laddr + new_offset, new_len
).si_then([this, ret = std::move(ret), new_len, o_laddr, &t, &bl]
- (auto ext) mutable {
+ (auto extents) mutable {
+ assert(extents.size() == 1);
+ auto ext = extents.front();
ceph_assert(ret.size() == 1);
auto iter = bl.cbegin();
iter.copy(new_len, ext->get_bptr().c_str());