}
}
- for (auto &i: t.ool_block_list) {
+ for (auto &i: t.written_ool_block_list) {
TRACET("fresh ool extent -- {}", t, *i);
ceph_assert(i->is_valid());
assert(!i->is_inline());
ceph_assert(t.get_fresh_block_stats().num ==
t.inline_block_list.size() +
- t.ool_block_list.size() +
- t.num_delayed_invalid_extents);
+ t.written_ool_block_list.size() +
+ t.num_delayed_invalid_extents +
+ t.num_allocated_invalid_extents);
auto& ool_stats = t.get_ool_write_stats();
- ceph_assert(ool_stats.extents.num == t.ool_block_list.size());
+ ceph_assert(ool_stats.extents.num == t.written_ool_block_list.size());
if (record.is_empty()) {
SUBINFOT(seastore_t,
}
ExtentPlacementManager::alloc_paddr_iertr::future<>
-ExtentPlacementManager::delayed_alloc_or_ool_write(
+ExtentPlacementManager::delayed_allocate_and_write(
Transaction &t,
const std::list<LogicalCachedExtentRef> &delayed_extents)
{
- LOG_PREFIX(ExtentPlacementManager::delayed_alloc_or_ool_write);
+ LOG_PREFIX(ExtentPlacementManager::delayed_allocate_and_write);
DEBUGT("start with {} delayed extents",
t, delayed_extents.size());
assert(writer_refs.size());
});
}
+ExtentPlacementManager::alloc_paddr_iertr::future<>
+ExtentPlacementManager::write_preallocated_ool_extents(
+ Transaction &t,
+ std::list<LogicalCachedExtentRef> extents)
+{
+ LOG_PREFIX(ExtentPlacementManager::write_preallocated_ool_extents);
+ DEBUGT("start with {} allocated extents",
+ t, extents.size());
+ assert(writer_refs.size());
+ return seastar::do_with(
+ std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>(),
+ [this, &t, extents=std::move(extents)](auto& alloc_map) {
+ for (auto& extent : extents) {
+ auto writer_ptr = get_writer(
+ extent->get_user_hint(),
+ get_extent_category(extent->get_type()),
+ extent->get_rewrite_generation());
+ alloc_map[writer_ptr].emplace_back(extent);
+ }
+ return trans_intr::do_for_each(alloc_map, [&t](auto& p) {
+ auto writer = p.first;
+ auto& extents = p.second;
+ return writer->alloc_write_ool_extents(t, extents);
+ });
+ });
+}
+
ExtentPlacementManager::close_ertr::future<>
ExtentPlacementManager::close()
{
).safe_then([&t, &ex, paddr, FNAME]() {
TRACET("ool extent written at {} -- {}",
t, paddr, *ex);
- t.mark_extent_ool(ex, paddr);
+ t.mark_allocated_extent_ool(ex);
return alloc_write_iertr::now();
});
});
}
/**
- * delayed_alloc_or_ool_write
+ * delayed_allocate_and_write
*
* Performs delayed allocation and do writes for out-of-line extents.
*/
using alloc_paddr_iertr = ExtentOolWriter::alloc_write_iertr;
- alloc_paddr_iertr::future<> delayed_alloc_or_ool_write(
+ alloc_paddr_iertr::future<> delayed_allocate_and_write(
Transaction& t,
const std::list<LogicalCachedExtentRef>& delayed_extents);
+ /**
+ * write_preallocated_ool_extents
+ *
+ * Performs ool writes for extents with pre-allocated addresses.
+ * See Transaction::pre_alloc_list
+ */
+ alloc_paddr_iertr::future<> write_preallocated_ool_extents(
+ Transaction &t,
+ std::list<LogicalCachedExtentRef> extents);
+
seastar::future<> stop_background() {
return background_process.stop_background();
}
fresh_block_stats.increment(ref->get_length());
} else if (ref->get_paddr().is_absolute()) {
assert(ref->is_logical());
- delayed_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
+ pre_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
fresh_block_stats.increment(ref->get_length());
} else {
assert(ref->get_paddr() == make_record_relative_paddr(0));
ref->set_paddr(final_addr);
assert(!ref->get_paddr().is_null());
assert(!ref->is_inline());
- ool_block_list.push_back(ref);
+ written_ool_block_list.push_back(ref);
write_set.insert(*ref);
}
- void mark_extent_ool(LogicalCachedExtentRef& ref, paddr_t final_addr) {
- mark_delayed_extent_ool(ref, final_addr);
+ void mark_allocated_extent_ool(LogicalCachedExtentRef& ref) {
+ assert(ref->get_paddr().is_absolute());
+ assert(!ref->is_inline());
+ written_ool_block_list.push_back(ref);
}
void add_mutated_extent(CachedExtentRef ref) {
return ret;
}
+ auto get_valid_pre_alloc_list() {
+ std::list<LogicalCachedExtentRef> ret;
+ assert(num_allocated_invalid_extents == 0);
+ for (auto& extent : pre_alloc_list) {
+ if (extent->is_valid()) {
+ ret.push_back(extent);
+ } else {
+ ++num_allocated_invalid_extents;
+ }
+ }
+ return ret;
+ }
+
const auto &get_mutated_block_list() {
return mutated_block_list;
}
template <typename F>
auto for_each_fresh_block(F &&f) const {
- std::for_each(ool_block_list.begin(), ool_block_list.end(), f);
+ std::for_each(written_ool_block_list.begin(), written_ool_block_list.end(), f);
std::for_each(inline_block_list.begin(), inline_block_list.end(), f);
}
mutated_block_list.clear();
fresh_block_stats = {};
num_delayed_invalid_extents = 0;
+ num_allocated_invalid_extents = 0;
delayed_alloc_list.clear();
inline_block_list.clear();
- ool_block_list.clear();
+ written_ool_block_list.clear();
+ pre_alloc_list.clear();
retired_set.clear();
existing_block_list.clear();
existing_block_stats = {};
* Contains a reference (without a refcount) to every extent mutated
* as part of *this. No contained extent may be referenced outside
* of *this. Every contained extent will be in one of inline_block_list,
- * ool_block_list, mutated_block_list, or delayed_alloc_list.
+ * written_ool_block_list or/and pre_alloc_list, mutated_block_list,
+ * or delayed_alloc_list.
*/
ExtentIndex write_set;
*/
io_stat_t fresh_block_stats;
uint64_t num_delayed_invalid_extents = 0;
+ uint64_t num_allocated_invalid_extents = 0;
/// blocks that will be committed with journal record inline
std::list<CachedExtentRef> inline_block_list;
/// blocks that will be committed with out-of-line record
- std::list<CachedExtentRef> ool_block_list;
+ std::list<CachedExtentRef> written_ool_block_list;
/// blocks with delayed allocation, may become inline or ool above
std::list<LogicalCachedExtentRef> delayed_alloc_list;
+ /// Extents with pre-allocated addresses,
+ /// will be added to written_ool_block_list after write
+ std::list<LogicalCachedExtentRef> pre_alloc_list;
+
/// list of mutated blocks, holds refcounts, subset of write_set
std::list<CachedExtentRef> mutated_block_list;
std::move(delayed_paddrs),
[this, FNAME, &tref](auto& delayed_extents, auto& delayed_paddrs)
{
- return epm->delayed_alloc_or_ool_write(tref, delayed_extents
+ return epm->delayed_allocate_and_write(tref, delayed_extents
).si_then([this, FNAME, &tref, &delayed_extents, &delayed_paddrs] {
SUBTRACET(seastore_t, "update delayed extent mappings", tref);
return lba_manager->update_mappings(tref, delayed_extents, delayed_paddrs);
crimson::ct_error::assert_all("invalid error")
);
});
+ }).si_then([this, FNAME, &tref] {
+ auto allocated_extents = tref.get_valid_pre_alloc_list();
+ auto num_extents = allocated_extents.size();
+ SUBTRACET(seastore_t, "process {} allocated extents", tref, num_extents);
+ return epm->write_preallocated_ool_extents(tref, allocated_extents
+ ).handle_error_interruptible(
+ crimson::ct_error::input_output_error::pass_further(),
+ crimson::ct_error::assert_all("invalid error")
+ );
}).si_then([this, FNAME, &tref] {
SUBTRACET(seastore_t, "about to prepare", tref);
return tref.get_handle().enter(write_pipeline.prepare);