void Cache::complete_commit(
Transaction &t,
paddr_t final_block_start,
- journal_seq_t start_seq,
- AsyncCleaner *cleaner)
+ journal_seq_t start_seq)
{
LOG_PREFIX(Cache::complete_commit);
SUBTRACET(seastore_t, "final_block_start={}, start_seq={}",
t, is_inline, *i);
const auto t_src = t.get_src();
add_extent(i, &t_src);
- if (cleaner) {
- cleaner->mark_space_used(
- i->get_paddr(),
- i->get_length());
- }
+ epm.mark_space_used(i->get_paddr(), i->get_length());
if (is_backref_mapped_extent_node(i)) {
DEBUGT("backref_list new {} len {}",
t,
}
}
- if (cleaner) {
- for (auto &i: t.retired_set) {
- cleaner->mark_space_free(
- i->get_paddr(),
- i->get_length());
- }
- for (auto &i: t.existing_block_list) {
- if (i->is_valid()) {
- cleaner->mark_space_used(
- i->get_paddr(),
- i->get_length());
- }
+ for (auto &i: t.retired_set) {
+ epm.mark_space_free(i->get_paddr(), i->get_length());
+ }
+ for (auto &i: t.existing_block_list) {
+ if (i->is_valid()) {
+ epm.mark_space_used(i->get_paddr(), i->get_length());
}
}
namespace crimson::os::seastore {
class BackrefManager;
-class AsyncCleaner;
class SegmentProvider;
struct backref_entry_t {
void complete_commit(
Transaction &t, ///< [in, out] current transaction
paddr_t final_block_start, ///< [in] offset of initial block
- journal_seq_t seq, ///< [in] journal commit seq
- AsyncCleaner *cleaner=nullptr ///< [out] optional segment stat listener
+ journal_seq_t seq ///< [in] journal commit seq
);
/**
});
}
+void ExtentPlacementManager::set_async_cleaner(AsyncCleanerRef &&_cleaner)
+{
+ cleaner = std::move(_cleaner);
+ writer_refs.clear();
+
+ ceph_assert(RECLAIM_GENERATIONS > 0);
+ data_writers_by_gen.resize(RECLAIM_GENERATIONS, {});
+ for (reclaim_gen_t gen = 0; gen < RECLAIM_GENERATIONS; ++gen) {
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ data_category_t::DATA, gen, *cleaner,
+ cleaner->get_ool_segment_seq_allocator()));
+ data_writers_by_gen[gen] = writer_refs.back().get();
+ }
+
+ md_writers_by_gen.resize(RECLAIM_GENERATIONS - 1, {});
+ for (reclaim_gen_t gen = 1; gen < RECLAIM_GENERATIONS; ++gen) {
+ writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
+ data_category_t::METADATA, gen, *cleaner,
+ cleaner->get_ool_segment_seq_allocator()));
+ md_writers_by_gen[gen - 1] = writer_refs.back().get();
+ }
+
+ for (auto *device : cleaner->get_segment_manager_group()
+ ->get_segment_managers()) {
+ add_device(device);
+ }
+}
+
+void ExtentPlacementManager::set_primary_device(Device *device)
+{
+ ceph_assert(primary_device == nullptr);
+ primary_device = device;
+ if (device->get_device_type() == device_type_t::SEGMENTED) {
+ prefer_ool = false;
+ ceph_assert(devices_by_id[device->get_device_id()] == device);
+ } else {
+ // RBM device is not in the cleaner.
+ ceph_assert(device->get_device_type() == device_type_t::RANDOM_BLOCK);
+ prefer_ool = true;
+ add_device(primary_device);
+ }
+}
+
}
#include "seastar/core/gate.hh"
+#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/journal/segment_allocator.h"
#include "crimson/os/seastore/logging.h"
};
using ExtentOolWriterRef = std::unique_ptr<ExtentOolWriter>;
-class SegmentProvider;
-
/**
* SegmentedOolWriter
*
devices_by_id.resize(DEVICE_ID_MAX, nullptr);
}
- void init_ool_writers(SegmentProvider &sp, SegmentSeqAllocator &ssa) {
- // Currently only one SegmentProvider is supported
- writer_refs.clear();
-
- ceph_assert(RECLAIM_GENERATIONS > 0);
- data_writers_by_gen.resize(RECLAIM_GENERATIONS, {});
- for (reclaim_gen_t gen = 0; gen < RECLAIM_GENERATIONS; ++gen) {
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
- data_category_t::DATA, gen, sp, ssa));
- data_writers_by_gen[gen] = writer_refs.back().get();
- }
+ // TODO: device tiering
+ void set_async_cleaner(AsyncCleanerRef &&_cleaner);
- md_writers_by_gen.resize(RECLAIM_GENERATIONS - 1, {});
- for (reclaim_gen_t gen = 1; gen < RECLAIM_GENERATIONS; ++gen) {
- writer_refs.emplace_back(std::make_unique<SegmentedOolWriter>(
- data_category_t::METADATA, gen, sp, ssa));
- md_writers_by_gen[gen - 1] = writer_refs.back().get();
- }
- }
+ void set_primary_device(Device *device);
- void add_device(Device* device, bool is_primary) {
- auto device_id = device->get_device_id();
- ceph_assert(devices_by_id[device_id] == nullptr);
- devices_by_id[device_id] = device;
- ++num_devices;
- if (is_primary) {
- ceph_assert(primary_device == nullptr);
- primary_device = device;
- if (device->get_device_type() == device_type_t::SEGMENTED) {
- prefer_ool = false;
- } else {
- ceph_assert(device->get_device_type() == device_type_t::RANDOM_BLOCK);
- prefer_ool = true;
- }
- }
+ void set_extent_callback(AsyncCleaner::ExtentCallbackInterface *cb) {
+ cleaner->set_extent_callback(cb);
}
seastore_off_t get_block_size() const {
return *primary_device;
}
+ store_statfs_t get_stat() const {
+ return cleaner->stat();
+ }
+
+ using mount_ret = AsyncCleaner::mount_ret;
+ mount_ret mount() {
+ return cleaner->mount();
+ }
+
using open_ertr = ExtentOolWriter::open_ertr;
- open_ertr::future<> open() {
+ open_ertr::future<> open_for_write() {
LOG_PREFIX(ExtentPlacementManager::open);
SUBINFO(seastore_journal, "started with {} devices", num_devices);
ceph_assert(primary_device != nullptr);
});
}
+ void start_scan_space() {
+ return cleaner->start_scan_space();
+ }
+
+ void start_gc() {
+ return cleaner->start_gc();
+ }
+
struct alloc_result_t {
paddr_t paddr;
bufferptr bp;
LOG_PREFIX(ExtentPlacementManager::delayed_alloc_or_ool_write);
SUBDEBUGT(seastore_journal, "start with {} delayed extents",
t, delayed_extents.size());
+ assert(writer_refs.size());
return seastar::do_with(
std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>(),
[this, &t, &delayed_extents](auto& alloc_map) {
});
}
+ seastar::future<> stop_gc() {
+ return cleaner->stop();
+ }
+
using close_ertr = ExtentOolWriter::close_ertr;
close_ertr::future<> close() {
LOG_PREFIX(ExtentPlacementManager::close);
return devices_by_id[addr.get_device_id()]->read(addr, len, out);
}
+ void mark_space_used(paddr_t addr, extent_len_t len) {
+ // TODO: improve tests to drop the cleaner check
+ if (cleaner) {
+ cleaner->mark_space_used(addr, len);
+ }
+ }
+
+ void mark_space_free(paddr_t addr, extent_len_t len) {
+ // TODO: improve tests to drop the cleaner check
+ if (cleaner) {
+ cleaner->mark_space_free(addr, len);
+ }
+ }
+
+ seastar::future<> reserve_projected_usage(std::size_t projected_usage) {
+ return cleaner->reserve_projected_usage(projected_usage);
+ }
+
+ void release_projected_usage(std::size_t projected_usage) {
+ return cleaner->release_projected_usage(projected_usage);
+ }
+
+ // Testing interfaces
+
+ void test_init_no_background(Device *test_device) {
+ assert(test_device->get_device_type() == device_type_t::SEGMENTED);
+ add_device(test_device);
+ set_primary_device(test_device);
+ }
+
+ bool check_usage() {
+ return cleaner->check_usage();
+ }
+
+ seastar::future<> run_background_work_until_halt() {
+ return cleaner->run_until_halt();
+ }
+
private:
+ void add_device(Device *device) {
+ auto device_id = device->get_device_id();
+ ceph_assert(devices_by_id[device_id] == nullptr);
+ devices_by_id[device_id] = device;
+ ++num_devices;
+ }
+
ExtentOolWriter* get_writer(placement_hint_t hint,
data_category_t category,
reclaim_gen_t gen) {
std::vector<Device*> devices_by_id;
Device* primary_device = nullptr;
std::size_t num_devices = 0;
+
+ // TODO: device tiering
+ AsyncCleanerRef cleaner;
};
+
using ExtentPlacementManagerRef = std::unique_ptr<ExtentPlacementManager>;
}
namespace crimson::os::seastore {
TransactionManager::TransactionManager(
- AsyncCleanerRef _async_cleaner,
JournalRef _journal,
CacheRef _cache,
LBAManagerRef _lba_manager,
- ExtentPlacementManagerRef &&epm,
- BackrefManagerRef&& backref_manager)
- : async_cleaner(std::move(_async_cleaner)),
- cache(std::move(_cache)),
+ ExtentPlacementManagerRef &&_epm,
+ BackrefManagerRef&& _backref_manager)
+ : cache(std::move(_cache)),
lba_manager(std::move(_lba_manager)),
journal(std::move(_journal)),
- epm(std::move(epm)),
- backref_manager(std::move(backref_manager))
+ epm(std::move(_epm)),
+ backref_manager(std::move(_backref_manager))
{
- async_cleaner->set_extent_callback(this);
+ epm->set_extent_callback(this);
journal->set_write_pipeline(&write_pipeline);
}
{
LOG_PREFIX(TransactionManager::mkfs);
INFO("enter");
- return async_cleaner->mount(
+ return epm->mount(
).safe_then([this] {
return journal->open_for_mkfs();
}).safe_then([this](auto start_seq) {
journal->get_trimmer().update_journal_tails(start_seq, start_seq);
journal->get_trimmer().set_journal_head(start_seq);
- return epm->open();
+ return epm->open_for_write();
}).safe_then([this, FNAME]() {
return with_transaction_intr(
Transaction::src_t::MUTATE,
LOG_PREFIX(TransactionManager::mount);
INFO("enter");
cache->init();
- return async_cleaner->mount(
+ return epm->mount(
).safe_then([this] {
return journal->replay(
[this](
return lba_manager->init_cached_extent(t, e);
}
}).si_then([this, &t] {
- async_cleaner->start_scan_space();
+ epm->start_scan_space();
return backref_manager->scan_mapped_space(
t,
[this](
assert(laddr == L_ADDR_NULL);
backref_manager->cache_new_backref_extent(paddr, type);
cache->update_tree_extents_num(type, 1);
- async_cleaner->mark_space_used(paddr, len);
+ epm->mark_space_used(paddr, len);
} else if (laddr == L_ADDR_NULL) {
cache->update_tree_extents_num(type, -1);
- async_cleaner->mark_space_free(paddr, len);
+ epm->mark_space_free(paddr, len);
} else {
cache->update_tree_extents_num(type, 1);
- async_cleaner->mark_space_used(paddr, len);
+ epm->mark_space_used(paddr, len);
}
});
});
});
}).safe_then([this] {
- return epm->open();
+ return epm->open_for_write();
}).safe_then([FNAME, this] {
- async_cleaner->start_gc();
+ epm->start_gc();
INFO("completed");
}).handle_error(
mount_ertr::pass_further{},
TransactionManager::close_ertr::future<> TransactionManager::close() {
LOG_PREFIX(TransactionManager::close);
INFO("enter");
- return async_cleaner->stop(
+ return epm->stop_gc(
).then([this] {
return cache->close();
}).safe_then([this] {
size_t projected_usage = t.get_allocation_size();
SUBTRACET(seastore_t, "waiting for projected_usage: {}", t, projected_usage);
return trans_intr::make_interruptible(
- async_cleaner->reserve_projected_usage(projected_usage)
+ epm->reserve_projected_usage(projected_usage)
).then_interruptible([this, &t] {
return submit_transaction_direct(t);
}).finally([this, FNAME, projected_usage, &t] {
SUBTRACET(seastore_t, "releasing projected_usage: {}", t, projected_usage);
- async_cleaner->release_projected_usage(projected_usage);
+ epm->release_projected_usage(projected_usage);
});
});
}
cache->complete_commit(
tref,
submit_result.record_block_base,
- start_seq,
- async_cleaner.get());
+ start_seq);
std::vector<CachedExtentRef> lba_to_clear;
std::vector<CachedExtentRef> backref_to_clear;
auto sms = std::make_unique<SegmentManagerGroup>();
auto backref_manager = create_backref_manager(*cache);
- epm->add_device(primary_device, true);
if (primary_device->get_device_type() == device_type_t::SEGMENTED) {
sms->add_segment_manager(static_cast<SegmentManager*>(primary_device));
}
for (auto &p_dev : secondary_devices) {
- epm->add_device(p_dev, false);
ceph_assert(p_dev->get_device_type() == device_type_t::SEGMENTED);
sms->add_segment_manager(static_cast<SegmentManager*>(p_dev));
}
ERROR("disabling journal trimming since support for CircularBoundedJournal "
"hasn't been added yet");
}
- epm->init_ool_writers(
- *async_cleaner,
- async_cleaner->get_ool_segment_seq_allocator());
+
+ epm->set_async_cleaner(std::move(async_cleaner));
+ epm->set_primary_device(primary_device);
return std::make_unique<TransactionManager>(
- std::move(async_cleaner),
std::move(journal),
std::move(cache),
std::move(lba_manager),
#include "crimson/osd/exceptions.h"
#include "crimson/os/seastore/logging.h"
-#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/lba_manager.h"
using base_iertr = Cache::base_iertr;
TransactionManager(
- AsyncCleanerRef async_cleaner,
JournalRef journal,
CacheRef cache,
LBAManagerRef lba_manager,
}
store_statfs_t store_stat() const {
- return async_cleaner->stat();
+ return epm->get_stat();
}
~TransactionManager();
private:
friend class Transaction;
- AsyncCleanerRef async_cleaner;
CacheRef cache;
LBAManagerRef lba_manager;
JournalRef journal;
public:
// Testing interfaces
- auto get_async_cleaner() {
- return async_cleaner.get();
+ auto get_epm() {
+ return epm.get();
}
auto get_lba_manager() {
});
});
}).unsafe_get0();
- async_cleaner->run_until_halt().get0();
+ epm->run_background_work_until_halt().get0();
// insert
logger().warn("start inserting {} kvs ...", kvs.size());
});
});
}).unsafe_get0();
- async_cleaner->run_until_halt().get0();
+ epm->run_background_work_until_halt().get0();
++iter;
}
}
});
});
}).unsafe_get0();
- async_cleaner->run_until_halt().get0();
+ epm->run_background_work_until_halt().get0();
++iter;
}
kvs.erase_from_random(kvs.random_begin(), kvs.random_end());
block_size = segment_manager->get_block_size();
next = segment_id_t{segment_manager->get_device_id(), 0};
sms->add_segment_manager(segment_manager.get());
- epm->add_device(segment_manager.get(), true);
+ epm->test_init_no_background(segment_manager.get());
journal->set_write_pipeline(&pipeline);
return journal->open_for_mkfs().discard_result();
}).safe_then([this] {
dummy_tail = journal_seq_t{0,
paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0)};
- return epm->open();
+ return epm->open_for_write();
}).safe_then([this] {
return seastar::do_with(
cache->create_transaction(
epm.reset(new ExtentPlacementManager());
cache.reset(new Cache(*epm));
current = paddr_t::make_seg_paddr(segment_id_t(segment_manager->get_device_id(), 0), 0);
- epm->add_device(segment_manager.get(), true);
+ epm->test_init_no_background(segment_manager.get());
return seastar::do_with(
get_transaction(),
[this](auto &ref_t) {
#include "test/crimson/gtest_seastar.h"
#include "test/crimson/seastore/transaction_manager_test_state.h"
-#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/cache.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
}
bool check_usage() {
- return async_cleaner->check_usage();
+ return epm->check_usage();
}
void replay() {
"try_submit_transaction hit invalid error"
}
).then([this](auto ret) {
- return async_cleaner->run_until_halt().then([ret] { return ret; });
+ return epm->run_background_work_until_halt(
+ ).then([ret] { return ret; });
}).get0();
if (success) {
});
});
}).safe_then([this]() {
- return async_cleaner->run_until_halt();
+ return epm->run_background_work_until_halt();
}).handle_error(
crimson::ct_error::assert_all{
"Invalid error in SeaStore::list_collections"
#include <random>
#include <boost/iterator/counting_iterator.hpp>
-#include "crimson/os/seastore/async_cleaner.h"
#include "crimson/os/seastore/cache.h"
+#include "crimson/os/seastore/extent_placement_manager.h"
#include "crimson/os/seastore/logging.h"
#include "crimson/os/seastore/transaction_manager.h"
#include "crimson/os/seastore/segment_manager/ephemeral.h"
protected:
TransactionManagerRef tm;
LBAManager *lba_manager;
- BackrefManager *backref_manager;
Cache* cache;
- AsyncCleaner *async_cleaner;
+ ExtentPlacementManager *epm;
uint64_t seq = 0;
TMTestState() : EphemeralTestState(1) {}
} else {
tm = make_transaction_manager(segment_manager.get(), sec_devices, true);
}
- async_cleaner = tm->get_async_cleaner();
+ epm = tm->get_epm();
lba_manager = tm->get_lba_manager();
- backref_manager = tm->get_backref_manager();
cache = tm->get_cache();
}
virtual void _destroy() override {
- async_cleaner = nullptr;
+ epm = nullptr;
lba_manager = nullptr;
tm.reset();
}
).handle_error(
crimson::ct_error::assert_all{"Error in mount"}
).then([this] {
- return async_cleaner->stop();
+ return epm->stop_gc();
}).then([this] {
- return async_cleaner->run_until_halt();
+ return epm->run_background_work_until_halt();
});
}
void submit_transaction(TransactionRef t) {
submit_transaction_fut(*t).unsafe_get0();
- async_cleaner->run_until_halt().get0();
+ epm->run_background_work_until_halt().get0();
}
};