Move lba_manamger->update_mapping() from epm to transaction manager.
Signed-off-by: Yingxin Cheng <yingxin.cheng@intel.com>
namespace crimson::os::seastore {
Cache::Cache(
- ExtentReader &reader)
+ ExtentReader &reader,
+ ExtentPlacementManager &epm)
: reader(reader),
+ epm(epm),
lru(crimson::common::get_conf<Option::size_t>(
"seastore_cache_lru_size"))
{
crimson::ct_error::input_output_error>;
using base_iertr = trans_iertr<base_ertr>;
- Cache(ExtentReader &reader);
+ Cache(ExtentReader &reader, ExtentPlacementManager &epm);
~Cache();
- void set_epm(ExtentPlacementManager& epm) {
- p_epm = &epm;
- }
-
/// Creates empty transaction by source
TransactionRef create_transaction(
Transaction::src_t src,
LOG_PREFIX(Cache::alloc_new_extent);
SUBTRACET(seastore_cache, "allocate {} {}B, hint={}",
t, T::TYPE, length, hint);
- auto result = p_epm->alloc_new_extent(t, T::TYPE, length, hint);
+ auto result = epm.alloc_new_extent(t, T::TYPE, length, hint);
auto ret = CachedExtent::make_cached_extent_ref<T>(std::move(result.bp));
ret->set_paddr(result.paddr);
t.add_fresh_extent(ret);
private:
ExtentReader &reader; ///< ref to extent reader
- ExtentPlacementManager* p_epm = nullptr;
+ ExtentPlacementManager& epm;
RootBlockRef root; ///< ref to current root
ExtentIndex extents; ///< set of live extents
#include "crimson/os/seastore/extent_placement_manager.h"
-#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/segment_cleaner.h"
namespace {
SegmentedAllocator::SegmentedAllocator(
SegmentProvider& sp,
SegmentManager& sm,
- LBAManager& lba_manager,
Journal& journal)
: segment_provider(sp),
segment_manager(sm),
- lba_manager(lba_manager),
journal(journal)
{
std::generate_n(
return Writer{
segment_provider,
segment_manager,
- lba_manager,
journal};
});
}
-SegmentedAllocator::Writer::finish_record_ret
-SegmentedAllocator::Writer::finish_write(
- Transaction& t,
- ool_record_t& record) {
- return trans_intr::do_for_each(record.get_extents(),
- [this, &t](auto& ool_extent) {
- LOG_PREFIX(SegmentedAllocator::Writer::finish_write);
- auto& lextent = ool_extent.get_lextent();
- DEBUGT("extent: {}, ool_paddr: {}",
- t,
- *lextent,
- ool_extent.get_ool_paddr());
- return lba_manager.update_mapping(
- t,
- lextent->get_laddr(),
- lextent->get_paddr(),
- ool_extent.get_ool_paddr()
- ).si_then([&ool_extent, &t, &lextent, this, FNAME] {
- lextent->hint = {};
- TRACET("mark extent as ool at {} -- {}", t, ool_extent.get_ool_paddr(), *lextent);
- t.mark_delayed_extent_ool(lextent, ool_extent.get_ool_paddr());
- return finish_record_iertr::now();
- });
- }).si_then([&record] {
- record.clear();
- });
-}
-
SegmentedAllocator::Writer::write_iertr::future<>
SegmentedAllocator::Writer::_write(
Transaction& t,
return trans_intr::make_interruptible(
current_segment->segment->write(record.get_base(), bl).safe_then(
- [this, pr=std::move(pr), &t,
- it=(--current_segment->inflight_writes.end()),
- cs=current_segment]() mutable {
- LOG_PREFIX(SegmentedAllocator::Writer::_write);
+ [this, FNAME, pr=std::move(pr), &t,
+ it=(--current_segment->inflight_writes.end()),
+ cs=current_segment]() mutable {
if (cs->outdated) {
DEBUGT("segment rolled", t);
pr.set_value();
}
return seastar::now();
})
- ).si_then([this, &record, &t]() mutable {
- return finish_write(t, record);
+ ).si_then([FNAME, &record, &t] {
+ for (auto& ool_extent : record.get_extents()) {
+ auto& lextent = ool_extent.get_lextent();
+ auto paddr = ool_extent.get_ool_paddr();
+ TRACET("ool extent written at {} -- {}", t, *lextent, paddr);
+ lextent->hint = {};
+ t.mark_delayed_extent_ool(lextent, paddr);
+ }
+ record.clear();
});
}
using open_segment_wrapper_ref =
boost::intrusive_ptr<open_segment_wrapper_t>;
-class LBAManager;
class SegmentProvider;
/**
Writer(
SegmentProvider& sp,
SegmentManager& sm,
- LBAManager& lba_manager,
Journal& journal)
: segment_provider(sp),
segment_manager(sm),
- lba_manager(lba_manager),
journal(journal)
{}
Writer(Writer &&) = default;
});
}
private:
- using finish_record_ertr = crimson::errorator<
- crimson::ct_error::input_output_error>;
- using finish_record_iertr = trans_iertr<finish_record_ertr>;
- using finish_record_ret = finish_record_iertr::future<>;
- finish_record_ret finish_write(
- Transaction& t,
- ool_record_t& record);
bool _needs_roll(seastore_off_t length) const;
write_iertr::future<> _write(
open_segment_wrapper_ref current_segment;
std::list<open_segment_wrapper_ref> open_segments;
seastore_off_t allocated_to = 0;
- LBAManager& lba_manager;
Journal& journal;
crimson::condition_variable segment_rotation_guard;
seastar::gate writer_guard;
SegmentedAllocator(
SegmentProvider& sp,
SegmentManager& sm,
- LBAManager& lba_manager,
Journal& journal);
Writer &get_writer(placement_hint_t hint) {
SegmentProvider& segment_provider;
SegmentManager& segment_manager;
std::vector<Writer> writers;
- LBAManager& lba_manager;
Journal& journal;
};
class ExtentPlacementManager {
public:
- ExtentPlacementManager(
- LBAManager& lba_manager
- ) : lba_manager(lba_manager) {}
+ ExtentPlacementManager() = default;
struct alloc_result_t {
paddr_t paddr;
/**
* delayed_alloc_or_ool_write
*
- * Performs any outstanding ool writes and updates pending lba updates
- * accordingly
+ * Performs delayed allocation and do writes for out-of-line extents.
*/
using alloc_paddr_iertr = ExtentOolWriter::write_iertr;
alloc_paddr_iertr::future<> delayed_alloc_or_ool_write(
- Transaction& t) {
+ Transaction& t,
+ const std::list<LogicalCachedExtentRef>& delayed_extents) {
LOG_PREFIX(ExtentPlacementManager::delayed_alloc_or_ool_write);
- SUBDEBUGT(seastore_tm, "start", t);
+ SUBDEBUGT(seastore_tm, "start with {} delayed extents",
+ t, delayed_extents.size());
return seastar::do_with(
std::map<ExtentAllocator*, std::list<LogicalCachedExtentRef>>(),
- [this, &t](auto& alloc_map) {
- LOG_PREFIX(ExtentPlacementManager::delayed_alloc_or_ool_write);
- auto& alloc_list = t.get_delayed_alloc_list();
- uint64_t num_ool_extents = 0;
- for (auto& extent : alloc_list) {
- // extents may be invalidated
- if (!extent->is_valid()) {
- t.increment_delayed_invalid_extents();
- continue;
- }
+ [this, &t, &delayed_extents](auto& alloc_map) {
+ for (auto& extent : delayed_extents) {
// For now, just do ool allocation for any delayed extent
auto& allocator_ptr = get_allocator(
get_allocator_type(extent->hint), extent->hint
);
alloc_map[allocator_ptr.get()].emplace_back(extent);
- num_ool_extents++;
}
- SUBDEBUGT(seastore_tm, "{} ool extents", t, num_ool_extents);
return trans_intr::do_for_each(alloc_map, [&t](auto& p) {
auto allocator = p.first;
auto& extents = p.second;
return devices[std::rand() % devices.size()];
}
- LBAManager& lba_manager;
std::map<device_type_t, std::vector<ExtentAllocatorRef>> allocators;
};
using ExtentPlacementManagerRef = std::unique_ptr<ExtentPlacementManager>;
#include "crimson/os/seastore/lba_manager.h"
#include "crimson/os/seastore/lba_manager/btree/btree_lba_manager.h"
-namespace crimson::os::seastore::lba_manager {
+namespace crimson::os::seastore {
-LBAManagerRef create_lba_manager(
+LBAManager::update_mappings_ret
+LBAManager::update_mappings(
+ Transaction& t,
+ const std::list<LogicalCachedExtentRef>& extents,
+ const std::vector<paddr_t>& original_paddrs)
+{
+ assert(extents.size() == original_paddrs.size());
+ auto extents_end = extents.end();
+ return seastar::do_with(
+ extents.begin(),
+ original_paddrs.begin(),
+ [this, extents_end, &t](auto& iter_extents,
+ auto& iter_original_paddrs) {
+ return trans_intr::repeat(
+ [this, extents_end, &t, &iter_extents, &iter_original_paddrs]
+ {
+ if (extents_end == iter_extents) {
+ return update_mappings_iertr::make_ready_future<
+ seastar::stop_iteration>(seastar::stop_iteration::yes);
+ }
+ return update_mapping(
+ t,
+ (*iter_extents)->get_laddr(),
+ *iter_original_paddrs,
+ (*iter_extents)->get_paddr()
+ ).si_then([&iter_extents, &iter_original_paddrs] {
+ ++iter_extents;
+ ++iter_original_paddrs;
+ return seastar::stop_iteration::no;
+ });
+ });
+ });
+}
+
+LBAManagerRef lba_manager::create_lba_manager(
SegmentManager &segment_manager,
Cache &cache) {
return LBAManagerRef(new btree::BtreeLBAManager(segment_manager, cache));
laddr_t laddr,
paddr_t prev_addr,
paddr_t paddr) = 0;
+
+ /**
+ * update_mappings
+ *
+ * update lba mappings for delayed allocated extents
+ */
+ using update_mappings_iertr = update_mapping_iertr;
+ using update_mappings_ret = update_mapping_ret;
+ update_mappings_ret update_mappings(
+ Transaction& t,
+ const std::list<LogicalCachedExtentRef>& extents,
+ const std::vector<paddr_t>& original_paddrs);
+
/**
* get_physical_extent_if_live
*
false /* detailed */);
auto journal = std::make_unique<Journal>(*sm, scanner_ref);
- auto cache = std::make_unique<Cache>(scanner_ref);
+ auto epm = std::make_unique<ExtentPlacementManager>();
+ auto cache = std::make_unique<Cache>(scanner_ref, *epm);
auto lba_manager = lba_manager::create_lba_manager(*sm, *cache);
- auto epm = std::make_unique<ExtentPlacementManager>(*lba_manager);
- cache->set_epm(*epm);
-
journal->set_segment_provider(&*segment_cleaner);
auto tm = std::make_unique<TransactionManager>(
return to_release;
}
- auto& get_delayed_alloc_list() {
- return delayed_alloc_list;
+ auto get_delayed_alloc_list() {
+ std::list<LogicalCachedExtentRef> ret;
+ for (auto& extent : delayed_alloc_list) {
+ // delayed extents may be invalidated
+ if (extent->is_valid()) {
+ ret.push_back(std::move(extent));
+ } else {
+ ++num_delayed_invalid_extents;
+ }
+ }
+ delayed_alloc_list.clear();
+ return ret;
}
const auto &get_mutated_block_list() {
return ool_write_stats;
}
- void increment_delayed_invalid_extents() {
- ++num_delayed_invalid_extents;
- }
-
private:
friend class Cache;
friend Ref make_test_transaction();
return trans_intr::make_interruptible(
tref.get_handle().enter(write_pipeline.ool_writes)
).then_interruptible([this, FNAME, &tref] {
- SUBTRACET(seastore_t, "process delayed and out-of-line extents", tref);
- return epm->delayed_alloc_or_ool_write(tref
- ).handle_error_interruptible(
- crimson::ct_error::input_output_error::pass_further(),
- crimson::ct_error::assert_all("invalid error")
- );
+ auto delayed_extents = tref.get_delayed_alloc_list();
+ auto num_extents = delayed_extents.size();
+ SUBTRACET(seastore_t, "process {} delayed extents", tref, num_extents);
+ std::vector<paddr_t> delayed_paddrs;
+ delayed_paddrs.reserve(num_extents);
+ for (auto& ext : delayed_extents) {
+ assert(ext->get_paddr().is_delayed());
+ delayed_paddrs.push_back(ext->get_paddr());
+ }
+ return seastar::do_with(
+ std::move(delayed_extents),
+ std::move(delayed_paddrs),
+ [this, FNAME, &tref](auto& delayed_extents, auto& delayed_paddrs)
+ {
+ return epm->delayed_alloc_or_ool_write(tref, delayed_extents
+ ).si_then([this, FNAME, &tref, &delayed_extents, &delayed_paddrs] {
+ SUBTRACET(seastore_t, "update delayed extent mappings", tref);
+ return lba_manager->update_mappings(tref, delayed_extents, delayed_paddrs);
+ }).handle_error_interruptible(
+ crimson::ct_error::input_output_error::pass_further(),
+ crimson::ct_error::assert_all("invalid error")
+ );
+ });
}).si_then([this, FNAME, &tref] {
SUBTRACET(seastore_t, "about to prepare", tref);
return tref.get_handle().enter(write_pipeline.prepare);
std::make_unique<SegmentedAllocator>(
*segment_cleaner,
*sm,
- *lba_manager,
*journal));
}
false /* detailed */);
std::vector<SegmentManager*> sms;
segment_cleaner->mount(segment_manager->get_device_id(), sms);
- auto journal = std::make_unique<Journal>(*segment_manager, scanner_ref);
- auto cache = std::make_unique<Cache>(scanner_ref);
+ auto journal = std::make_unique<Journal>(*segment_manager, *scanner);
+ auto epm = std::make_unique<ExtentPlacementManager>();
+ auto cache = std::make_unique<Cache>(scanner_ref, *epm);
auto lba_manager = lba_manager::create_lba_manager(*segment_manager, *cache);
- auto epm = std::make_unique<ExtentPlacementManager>(*cache, *lba_manager);
-
epm->add_allocator(
device_type_t::SEGMENTED,
std::make_unique<SegmentedAllocator>(
*segment_cleaner,
*segment_manager,
- *lba_manager,
- *journal,
- *cache));
+ *journal));
journal->set_segment_provider(&*segment_cleaner);
segment_manager::EphemeralSegmentManagerRef segment_manager;
ExtentReaderRef scanner;
JournalRef journal;
+ ExtentPlacementManagerRef epm;
CacheRef cache;
size_t block_size;
segment_manager = segment_manager::create_test_ephemeral();
scanner.reset(new ExtentReader());
journal.reset(new Journal(*segment_manager, *scanner));
- cache.reset(new Cache(*scanner));
+ epm.reset(new ExtentPlacementManager());
+ cache.reset(new Cache(*scanner, *epm));
block_size = segment_manager->get_block_size();
next = segment_id_t{segment_manager->get_device_id(), 0};
segment_manager.reset();
scanner.reset();
journal.reset();
+ epm.reset();
cache.reset();
}).handle_error(
crimson::ct_error::all_same_way([] {
struct cache_test_t : public seastar_test_suite_t {
segment_manager::EphemeralSegmentManagerRef segment_manager;
ExtentReaderRef reader;
+ ExtentPlacementManagerRef epm;
CacheRef cache;
paddr_t current;
journal_seq_t seq;
seastar::future<> set_up_fut() final {
segment_manager = segment_manager::create_test_ephemeral();
reader.reset(new ExtentReader());
- cache.reset(new Cache(*reader));
+ epm.reset(new ExtentPlacementManager());
+ cache.reset(new Cache(*reader, *epm));
current = paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0);
reader->add_segment_manager(segment_manager.get());
return segment_manager->init(
).safe_then([this] {
segment_manager.reset();
reader.reset();
+ epm.reset();
cache.reset();
}).handle_error(
Cache::close_ertr::assert_all{}
std::move(scanner),
true);
auto journal = std::make_unique<Journal>(segment_manager, scanner_ref);
- auto cache = std::make_unique<Cache>(scanner_ref);
+ auto epm = std::make_unique<ExtentPlacementManager>();
+ auto cache = std::make_unique<Cache>(scanner_ref, *epm);
auto lba_manager = lba_manager::create_lba_manager(segment_manager, *cache);
- auto epm = std::make_unique<ExtentPlacementManager>(*cache, *lba_manager);
-
epm->add_allocator(
device_type_t::SEGMENTED,
std::make_unique<SegmentedAllocator>(
*segment_cleaner,
segment_manager,
- *lba_manager,
- *journal,
- *cache));
+ *journal));
journal->set_segment_provider(&*segment_cleaner);