namespace crimson::os::seastore {
SegmentedAllocator::SegmentedAllocator(
- SegmentProvider& sp,
- SegmentManager& sm,
+ SegmentProvider &sp,
SegmentSeqAllocator &ssa)
- : cold_writer{"COLD", sp, sm, ssa},
- rewrite_writer{"REWRITE", sp, sm, ssa}
-{}
+ : cold_writer{"COLD", sp, ssa},
+ rewrite_writer{"REWRITE", sp, ssa}
+{
+}
SegmentedAllocator::Writer::Writer(
std::string name,
SegmentProvider& sp,
- SegmentManager& sm,
SegmentSeqAllocator &ssa)
- : segment_allocator(name, segment_type_t::OOL, sp, sm, ssa),
+ : segment_allocator(name, segment_type_t::OOL, sp, ssa),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
class Writer : public ExtentOolWriter {
public:
Writer(std::string name,
- SegmentProvider& sp,
- SegmentManager& sm,
+ SegmentProvider &sp,
SegmentSeqAllocator &ssa);
Writer(Writer &&) = default;
};
public:
SegmentedAllocator(
- SegmentProvider& sp,
- SegmentManager& sm,
+ SegmentProvider &sp,
SegmentSeqAllocator &ssa);
Writer &get_writer(placement_hint_t hint) {
namespace crimson::os::seastore::journal {
JournalRef make_segmented(
- SegmentManager &sm,
SegmentManagerGroup &sms,
SegmentProvider &provider)
{
- return std::make_unique<SegmentedJournal>(sm, sms, provider);
+ return std::make_unique<SegmentedJournal>(sms, provider);
}
}
class NVMeBlockDevice;
}
-class SegmentManager;
class SegmentManagerGroup;
class SegmentProvider;
namespace journal {
JournalRef make_segmented(
- SegmentManager &sm,
SegmentManagerGroup &sms,
SegmentProvider &provider);
std::string name,
segment_type_t type,
SegmentProvider &sp,
- SegmentManager &sm,
SegmentSeqAllocator &ssa)
: name{name},
print_name{fmt::format("D?_{}", name)},
type{type},
segment_provider{sp},
- segment_manager{sm},
+ sm_group{*sp.get_segment_manager_group()},
segment_seq_allocator(ssa)
{
ceph_assert(type != segment_type_t::NULL_SEG);
ceph_assert(!current_segment);
segment_seq_t new_segment_seq =
segment_seq_allocator.get_and_inc_next_segment_seq();
- auto meta = segment_manager.get_meta();
+ auto meta = sm_group.get_meta();
current_segment_nonce = ceph_crc32c(
new_segment_seq,
reinterpret_cast<const unsigned char *>(meta.seastore_id.bytes()),
sizeof(meta.seastore_id.uuid));
- auto new_segment_id = segment_provider.get_segment(
- get_device_id(), new_segment_seq, type);
- return segment_manager.open(new_segment_id
+ auto new_segment_id = segment_provider.get_segment(new_segment_seq, type);
+ ceph_assert(new_segment_id != NULL_SEG_ID);
+ return sm_group.open(new_segment_id
).handle_error(
open_ertr::pass_further{},
crimson::ct_error::assert_all{
INFO("{} writing header to new segment ... -- {}",
print_name, header);
- auto header_length = segment_manager.get_block_size();
+ auto header_length = get_block_size();
bufferlist bl;
encode(header, bl);
bufferptr bp(ceph::buffer::create_page_aligned(header_length));
SegmentAllocator::open()
{
LOG_PREFIX(SegmentAllocator::open);
- print_name = fmt::format("D{}_{}",
- device_id_printer_t{get_device_id()},
- name);
+ auto& device_ids = sm_group.get_device_ids();
+ ceph_assert(device_ids.size());
+ std::ostringstream oss;
+ oss << "D";
+ for (auto& device_id : device_ids) {
+ oss << "_" << device_id_printer_t{device_id};
+ }
+ oss << "_" << name;
+ print_name = oss.str();
+
INFO("{}", print_name);
return do_open();
}
};
TRACE("{} {}~{}", print_name, write_start_seq, write_length);
assert(write_length > 0);
- assert((write_length % segment_manager.get_block_size()) == 0);
+ assert((write_length % get_block_size()) == 0);
assert(!needs_roll(write_length));
auto write_result = write_result_t{
current_segment_nonce,
tail.journal_tail);
- bufferptr bp(
- ceph::buffer::create_page_aligned(
- segment_manager.get_block_size()));
+ bufferptr bp(ceph::buffer::create_page_aligned(get_block_size()));
bp.zero();
auto iter = bl.cbegin();
iter.copy(bl.length(), bp.c_str());
bl.clear();
bl.append(bp);
- assert(bl.length() ==
- (size_t)segment_manager.get_rounded_tail_length());
+ assert(bl.length() == sm_group.get_rounded_tail_length());
return seg_to_close->write(
- segment_manager.get_segment_size()
- - segment_manager.get_rounded_tail_length(),
+ sm_group.get_segment_size() - sm_group.get_rounded_tail_length(),
bl
).safe_then([seg_to_close=std::move(seg_to_close)] {
return seg_to_close->close();
#include "include/buffer.h"
#include "crimson/common/errorator.h"
-#include "crimson/os/seastore/segment_manager.h"
+#include "crimson/os/seastore/segment_manager_group.h"
#include "crimson/os/seastore/segment_seq_allocator.h"
namespace crimson::os::seastore {
SegmentAllocator(std::string name,
segment_type_t type,
SegmentProvider &sp,
- SegmentManager &sm,
SegmentSeqAllocator &ssa);
const std::string& get_name() const {
return print_name;
}
- device_id_t get_device_id() const {
- return segment_manager.get_device_id();
- }
-
seastore_off_t get_block_size() const {
- return segment_manager.get_block_size();
+ return sm_group.get_block_size();
}
extent_len_t get_max_write_length() const {
- return segment_manager.get_segment_size() -
- p2align(ceph::encoded_sizeof_bounded<segment_header_t>(),
- size_t(segment_manager.get_block_size()));
- }
-
- device_segment_id_t get_num_segments() const {
- return segment_manager.get_num_segments();
+ return sm_group.get_segment_size() -
+ sm_group.get_rounded_header_length() -
+ sm_group.get_rounded_tail_length();
}
bool can_write() const {
// returns true iff the current segment has insufficient space
bool needs_roll(std::size_t length) const {
assert(can_write());
- auto write_capacity = current_segment->get_write_capacity()
- - segment_manager.get_rounded_tail_length();
+ assert(current_segment->get_write_capacity() ==
+ sm_group.get_segment_size());
+ auto write_capacity = current_segment->get_write_capacity() -
+ sm_group.get_rounded_tail_length();
return length + written_to > std::size_t(write_capacity);
}
std::string print_name;
const segment_type_t type; // JOURNAL or OOL
SegmentProvider &segment_provider;
- SegmentManager &segment_manager;
+ SegmentManagerGroup &sm_group;
SegmentRef current_segment;
seastore_off_t written_to;
SegmentSeqAllocator &segment_seq_allocator;
namespace crimson::os::seastore::journal {
SegmentedJournal::SegmentedJournal(
- SegmentManager &segment_manager,
SegmentManagerGroup &sms,
SegmentProvider &segment_provider)
: segment_provider(segment_provider),
journal_segment_allocator("JOURNAL",
segment_type_t::JOURNAL,
segment_provider,
- segment_manager,
- *segment_seq_allocator),
+ *segment_seq_allocator),
record_submitter(crimson::common::get_conf<uint64_t>(
"seastore_journal_iodepth_limit"),
crimson::common::get_conf<uint64_t>(
std::for_each(
segments.begin(),
segments.end(),
- [this, FNAME](auto &seg)
+ [FNAME](auto &seg)
{
if (seg.first != seg.second.physical_segment_id ||
- seg.first.device_id() != journal_segment_allocator.get_device_id() ||
seg.second.get_type() != segment_type_t::JOURNAL) {
ERROR("illegal journal segment for replay -- {}", seg.second);
ceph_abort();
);
}
-SegmentedJournal::find_journal_segments_ret
-SegmentedJournal::find_journal_segments()
-{
- return seastar::do_with(
- find_journal_segments_ret_bare{},
- [this](auto &ret) -> find_journal_segments_ret {
- return crimson::do_for_each(
- boost::counting_iterator<device_segment_id_t>(0),
- boost::counting_iterator<device_segment_id_t>(
- journal_segment_allocator.get_num_segments()),
- [this, &ret](device_segment_id_t d_segment_id) {
- segment_id_t segment_id{
- journal_segment_allocator.get_device_id(),
- d_segment_id};
- return sms.read_segment_header(
- segment_id
- ).safe_then([segment_id, &ret](auto &&header) {
- if (header.get_type() == segment_type_t::JOURNAL) {
- ret.emplace_back(std::make_pair(segment_id, std::move(header)));
- }
- }).handle_error(
- crimson::ct_error::enoent::handle([](auto) {
- return find_journal_segments_ertr::now();
- }),
- crimson::ct_error::enodata::handle([](auto) {
- return find_journal_segments_ertr::now();
- }),
- crimson::ct_error::input_output_error::pass_further{}
- );
- }).safe_then([&ret]() mutable {
- return find_journal_segments_ret{
- find_journal_segments_ertr::ready_future_marker{},
- std::move(ret)};
- });
- });
-}
-
SegmentedJournal::replay_ret SegmentedJournal::replay(
delta_handler_t &&delta_handler)
{
LOG_PREFIX(Journal::replay);
- return find_journal_segments(
+ return sms.find_journal_segment_headers(
).safe_then([this, FNAME, delta_handler=std::move(delta_handler)]
(auto &&segment_headers) mutable -> replay_ret {
INFO("got {} segments", segment_headers.size());
class SegmentedJournal : public Journal {
public:
SegmentedJournal(
- SegmentManager &segment_manager,
SegmentManagerGroup& sms,
SegmentProvider& cleaner);
~SegmentedJournal() {}
SegmentManagerGroup& sms;
WritePipeline* write_pipeline = nullptr;
- /// read journal segment headers from sms
- using find_journal_segments_ertr = crimson::errorator<
- crimson::ct_error::input_output_error>;
- using find_journal_segments_ret_bare = std::vector<
- std::pair<segment_id_t, segment_header_t>>;
- using find_journal_segments_ret = find_journal_segments_ertr::future<
- find_journal_segments_ret_bare>;
- find_journal_segments_ret find_journal_segments();
-
/// return ordered vector of segments to replay
using replay_segments_t = std::vector<
std::pair<journal_seq_t, segment_header_t>>;
return Device::make_device(
device
).then([&device](DeviceRef device_obj) {
- auto tm = make_transaction_manager(*device_obj, false /* detailed */);
+ auto tm = make_transaction_manager(false /* detailed */);
auto cm = std::make_unique<collection_manager::FlatCollectionManager>(*tm);
return std::make_unique<SeaStore>(
device,
}
segment_id_t SegmentCleaner::get_segment(
- device_id_t device_id,
segment_seq_t seq,
segment_type_t type)
{
LOG_PREFIX(SegmentCleaner::get_segment);
assert(seq != NULL_SEG_SEQ);
- for (auto it = segments.device_begin(device_id);
- it != segments.device_end(device_id);
+ for (auto it = segments.begin();
+ it != segments.end();
++it) {
auto seg_id = it->first;
auto& segment_info = it->second;
return seg_id;
}
}
- ERROR("(TODO) handle out of space from device {} with segment_seq={}",
- device_id, segment_seq_printer_t{seq});
+ ERROR("out of space with segment_seq={}", segment_seq_printer_t{seq});
ceph_abort();
return NULL_SEG_ID;
}
});
}
-SegmentCleaner::mount_ret SegmentCleaner::mount(
- device_id_t pdevice_id)
+SegmentCleaner::mount_ret SegmentCleaner::mount()
{
const auto& sms = sm_group->get_segment_managers();
logger().debug(
journal_tail_target = JOURNAL_SEQ_NULL;
journal_tail_committed = JOURNAL_SEQ_NULL;
journal_head = JOURNAL_SEQ_NULL;
- journal_device_id = pdevice_id;
space_tracker.reset(
detailed ?
}
return seastar::now();
}),
- [&cursor, header, segment_id, this](auto& handler) {
+ [&cursor, header, this](auto& handler) {
return sm_group->scan_valid_records(
cursor,
header.segment_nonce,
- segments[segment_id.device_id()]->segment_size,
+ segments.get_segment_size(),
handler);
}
);
segment_manager_info_t(
device_id_t device_id,
device_segment_id_t num_segments,
- seastore_off_t segment_size,
seastore_off_t block_size,
size_t empty_segments,
size_t size)
: device_id(device_id),
num_segments(num_segments),
- segment_size(segment_size),
block_size(block_size),
empty_segments(empty_segments),
size(size),
device_id_t device_id = 0;
device_segment_id_t num_segments = 0;
- seastore_off_t segment_size = 0;
seastore_off_t block_size = 0;
size_t empty_segments = 0;
size_t size = 0;
journal_segments = 0;
avail_bytes = 0;
opened_segments = 0;
+ segment_size = 0;
}
void add_segment_manager(SegmentManager& segment_manager)
{
device_id_t d_id = segment_manager.get_device_id();
+ auto ssize = segment_manager.get_segment_size();
+ ceph_assert(ssize != 0);
segments.add_device(
d_id,
segment_manager.get_num_segments(),
sm_infos[segment_manager.get_device_id()].emplace(
d_id,
segment_manager.get_num_segments(),
- segment_manager.get_segment_size(),
segment_manager.get_block_size(),
segment_manager.get_num_segments(),
segment_manager.get_size());
total_bytes += segment_manager.get_size();
avail_bytes += segment_manager.get_size();
+
+ // assume all the segment managers share the same settings as follows.
+ if (segment_size == 0) {
+ segment_size = ssize;
+ } else {
+ ceph_assert(segment_size == ssize);
+ }
}
device_segment_id_t size() const {
return segments.end();
}
- auto device_begin(device_id_t id) {
- return segments.device_begin(id);
- }
- auto device_end(device_id_t id) {
- return segments.device_end(id);
- }
-
// the following methods are used for keeping track of
// seastore disk space usage
void segment_opened(segment_id_t segment) {
ceph_assert(segments[segment].is_empty());
// must be opening a new segment
auto [iter, inserted] = sm_info->open_segment_avails.emplace(
- segment, sm_info->segment_size);
+ segment, get_segment_size());
opened_segments++;
ceph_assert(inserted);
}
void segment_emptied(segment_id_t segment) {
auto& sm_info = sm_infos[segment.device_id()];
sm_info->empty_segments++;
- sm_info->avail_bytes += sm_info->segment_size;
- avail_bytes += sm_info->segment_size;
+ sm_info->avail_bytes += get_segment_size();
+ avail_bytes += get_segment_size();
}
void segment_closed(segment_id_t segment) {
assert(segments.contains(segment));
opened_segments--;
} else {
ceph_assert(segment_info.is_empty());
- assert(sm_info->avail_bytes >= (size_t)sm_info->segment_size);
- assert(avail_bytes >= (size_t)sm_info->segment_size);
+ assert(sm_info->avail_bytes >= (std::size_t)get_segment_size());
+ assert(avail_bytes >= (std::size_t)get_segment_size());
assert(sm_info->empty_segments > 0);
- sm_info->avail_bytes -= sm_info->segment_size;
- avail_bytes -= sm_info->segment_size;
+ sm_info->avail_bytes -= get_segment_size();
+ avail_bytes -= get_segment_size();
sm_info->empty_segments--;
}
segment_info.set_closed();
offset);
return;
}
- auto new_avail_bytes = sm_info->segment_size - offset.as_seg_paddr().get_segment_off();
+ auto new_avail_bytes = get_segment_size() -
+ offset.as_seg_paddr().get_segment_off();
if (iter->second < new_avail_bytes) {
crimson::get_logger(ceph_subsys_seastore_cleaner).error(
"SegmentCleaner::update_segment_avail_bytes:"
}
return num;
}
+ seastore_off_t get_segment_size() const {
+ assert(segment_size != 0);
+ return segment_size;
+ }
+
private:
std::vector<std::optional<segment_manager_info_t>> sm_infos;
segment_map_t<segment_info_t> segments;
size_t total_bytes = 0;
size_t avail_bytes = 0;
size_t opened_segments = 0;
+ seastore_off_t segment_size = 0;
friend class SegmentCleaner;
};
class SegmentProvider {
public:
virtual segment_id_t get_segment(
- device_id_t id, segment_seq_t seq, segment_type_t type) = 0;
+ segment_seq_t seq, segment_type_t type) = 0;
virtual void close_segment(segment_id_t) {}
virtual void update_segment_avail_bytes(paddr_t offset) = 0;
+ virtual SegmentManagerGroup* get_segment_manager_group() = 0;
+
virtual ~SegmentProvider() {}
};
/// head of journal
journal_seq_t journal_head;
- device_id_t journal_device_id;
-
ExtentCallbackInterface *ecb = nullptr;
/// populated if there is an IO blocked on hard limits
using mount_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
using mount_ret = mount_ertr::future<>;
- mount_ret mount(device_id_t pdevice_id);
+ mount_ret mount();
segment_id_t get_segment(
- device_id_t id, segment_seq_t seq, segment_type_t type) final;
+ segment_seq_t seq, segment_type_t type) final;
void close_segment(segment_id_t segment) final;
return segments[id].get_type();
}
+ SegmentManagerGroup* get_segment_manager_group() final {
+ return sm_group.get();
+ }
+
using release_ertr = SegmentManagerGroup::release_ertr;
release_ertr::future<> maybe_release_segment(Transaction &t);
}
size_t get_bytes_available_current_segment() const {
- auto& seg_addr = journal_head.offset.as_seg_paddr();
- auto segment_size =
- segments[seg_addr.get_segment_id().device_id()]->segment_size;
+ auto segment_size = segments.get_segment_size();
return segment_size - get_bytes_used_current_segment();
}
if (journal_head == JOURNAL_SEQ_NULL) {
// this for calculating journal bytes in the journal
// replay phase in which journal_head is not set
- return segments.get_journal_segments() * segments[journal_device_id]->segment_size;
+ return segments.get_journal_segments() * segments.get_segment_size();
} else {
assert(journal_head >= journal_tail_committed);
- auto& seg_addr = journal_head.offset.as_seg_paddr();
- auto segment_size =
- segments[seg_addr.get_segment_id().device_id()]->segment_size;
+ auto segment_size = segments.get_segment_size();
return (journal_head.segment_seq - journal_tail_committed.segment_seq + 1) *
segment_size;
}
assert(s_type != segment_type_t::NULL_SEG);
segments[segment].type = s_type;
if (s_type == segment_type_t::JOURNAL) {
- assert(journal_device_id == segment.device_id());
segments.new_journal_segment();
} else {
assert(s_type == segment_type_t::OOL);
ceph_assert(get_size() % get_segment_size() == 0);
return ((device_segment_id_t)(get_size() / get_segment_size()));
}
- seastore_off_t get_rounded_tail_length() const {
- return p2roundup(
- ceph::encoded_sizeof_bounded<segment_tail_t>(),
- (size_t)get_block_size());
- }
virtual ~SegmentManager() {}
return segment_manager.read(
paddr_t::make_seg_paddr(
segment,
- segment_manager.get_segment_size() -
- segment_manager.get_rounded_tail_length()),
- segment_manager.get_rounded_tail_length()
+ segment_manager.get_segment_size() - get_rounded_tail_length()),
+ get_rounded_tail_length()
).handle_error(
read_segment_header_ertr::pass_further{},
crimson::ct_error::assert_all{
auto& segment_manager = *segment_managers[segment.device_id()];
return segment_manager.read(
paddr_t::make_seg_paddr(segment, 0),
- segment_manager.get_block_size()
+ get_rounded_header_length()
).handle_error(
read_segment_header_ertr::pass_further{},
crimson::ct_error::assert_all{
});
}
+SegmentManagerGroup::find_journal_segment_headers_ret
+SegmentManagerGroup::find_journal_segment_headers()
+{
+ return seastar::do_with(
+ get_segment_managers(),
+ find_journal_segment_headers_ret_bare{},
+ [this](auto &sms, auto& ret) -> find_journal_segment_headers_ret
+ {
+ return crimson::do_for_each(sms,
+ [this, &ret](SegmentManager *sm)
+ {
+ LOG_PREFIX(SegmentManagerGroup::find_journal_segment_headers);
+ auto device_id = sm->get_device_id();
+ auto num_segments = sm->get_num_segments();
+ INFO("processing {} with {} segments",
+ device_id_printer_t{device_id}, num_segments);
+ return crimson::do_for_each(
+ boost::counting_iterator<device_segment_id_t>(0),
+ boost::counting_iterator<device_segment_id_t>(num_segments),
+ [this, &ret, device_id](device_segment_id_t d_segment_id)
+ {
+ segment_id_t segment_id{device_id, d_segment_id};
+ return read_segment_header(segment_id
+ ).safe_then([segment_id, &ret](auto &&header) {
+ if (header.get_type() == segment_type_t::JOURNAL) {
+ ret.emplace_back(std::make_pair(segment_id, std::move(header)));
+ }
+ }).handle_error(
+ crimson::ct_error::enoent::handle([](auto) {
+ return find_journal_segment_headers_ertr::now();
+ }),
+ crimson::ct_error::enodata::handle([](auto) {
+ return find_journal_segment_headers_ertr::now();
+ }),
+ crimson::ct_error::input_output_error::pass_further{}
+ );
+ });
+ }).safe_then([&ret]() mutable {
+ return find_journal_segment_headers_ret{
+ find_journal_segment_headers_ertr::ready_future_marker{},
+ std::move(ret)};
+ });
+ });
+}
+
} // namespace crimson::os::seastore
device_ids.clear();
}
+ /**
+ * get device info
+ *
+ * Assume all segment managers share the same following information.
+ */
+ seastore_off_t get_block_size() const {
+ assert(device_ids.size());
+ return segment_managers[*device_ids.begin()]->get_block_size();
+ }
+
+ seastore_off_t get_segment_size() const {
+ assert(device_ids.size());
+ return segment_managers[*device_ids.begin()]->get_segment_size();
+ }
+
+ const seastore_meta_t &get_meta() const {
+ assert(device_ids.size());
+ return segment_managers[*device_ids.begin()]->get_meta();
+ }
+
+ std::size_t get_rounded_header_length() const {
+ return p2roundup(
+ ceph::encoded_sizeof_bounded<segment_header_t>(),
+ (std::size_t)get_block_size());
+ }
+
+ std::size_t get_rounded_tail_length() const {
+ return p2roundup(
+ ceph::encoded_sizeof_bounded<segment_tail_t>(),
+ (std::size_t)get_block_size());
+ }
+
using read_segment_header_ertr = crimson::errorator<
crimson::ct_error::enoent,
crimson::ct_error::enodata,
found_record_handler_t &handler ///< [in] handler for records
); ///< @return used budget
+ /*
+ * read journal segment headers
+ */
+ using find_journal_segment_headers_ertr = crimson::errorator<
+ crimson::ct_error::input_output_error>;
+ using find_journal_segment_headers_ret_bare = std::vector<
+ std::pair<segment_id_t, segment_header_t>>;
+ using find_journal_segment_headers_ret = find_journal_segment_headers_ertr::future<
+ find_journal_segment_headers_ret_bare>;
+ find_journal_segment_headers_ret find_journal_segment_headers();
+
+ using open_ertr = SegmentManager::open_ertr;
+ open_ertr::future<SegmentRef> open(segment_id_t id) {
+ assert(has_device(id.device_id()));
+ return segment_managers[id.device_id()]->open(id);
+ }
+
using release_ertr = SegmentManager::release_ertr;
release_ertr::future<> release_segment(segment_id_t id) {
assert(has_device(id.device_id()));
LOG_PREFIX(TransactionManager::mkfs);
INFO("enter");
return segment_cleaner->mount(
- epm->get_primary_device().get_device_id()
).safe_then([this] {
return journal->open_for_write();
}).safe_then([this](auto addr) {
INFO("enter");
cache->init();
return segment_cleaner->mount(
- epm->get_primary_device().get_device_id()
).safe_then([this] {
return journal->replay(
[this](const auto &offsets, const auto &e, auto last_modified) {
cache->dump_contents();
return journal->close();
}).safe_then([this] {
- sms.reset();
return epm->close();
- }).safe_then([FNAME] {
+ }).safe_then([FNAME, this] {
INFO("completed");
+ sms.reset();
return seastar::now();
});
}
TransactionManager::~TransactionManager() {}
-TransactionManagerRef make_transaction_manager(
- Device &device,
- bool detailed)
+TransactionManagerRef make_transaction_manager(bool detailed)
{
auto sms = std::make_unique<SegmentManagerGroup>();
auto& sms_ref = *sms.get();
SegmentCleaner::config_t::get_default(),
std::move(sms),
detailed);
- ceph_assert(device.get_device_type() == device_type_t::SEGMENTED);
- auto sm = dynamic_cast<SegmentManager*>(&device);
- ceph_assert(sm != nullptr);
- auto journal = journal::make_segmented(*sm, sms_ref, *segment_cleaner);
+ auto journal = journal::make_segmented(sms_ref, *segment_cleaner);
auto epm = std::make_unique<ExtentPlacementManager>();
auto cache = std::make_unique<Cache>(*epm);
auto lba_manager = lba_manager::create_lba_manager(*cache);
SUBDEBUG(seastore_tm, "adding device {}, is_primary={}",
dev->get_device_id(), is_primary);
epm->add_device(dev, is_primary);
+ epm->add_allocator(
+ dev->get_device_type(),
+ std::make_unique<SegmentedAllocator>(
+ *segment_cleaner,
+ segment_cleaner->get_ool_segment_seq_allocator()));
ceph_assert(dev->get_device_type() == device_type_t::SEGMENTED);
auto sm = dynamic_cast<SegmentManager*>(dev);
ceph_assert(sm != nullptr);
- epm->add_allocator(
- dev->get_device_type(),
- std::make_unique<SegmentedAllocator>(
- *segment_cleaner,
- *sm,
- segment_cleaner->get_ool_segment_seq_allocator()));
sms.add_segment_manager(sm);
}
};
using TransactionManagerRef = std::unique_ptr<TransactionManager>;
-TransactionManagerRef make_transaction_manager(
- Device &device,
- bool detailed);
+TransactionManagerRef make_transaction_manager(bool detailed);
}
void TMDriver::init()
{
- tm = make_transaction_manager(*device, false /* detailed */);
+ tm = make_transaction_manager(false /* detailed */);
tm->add_device(device.get(), true);
}
segment_id_t next;
- btree_test_base() = default;
-
std::map<segment_id_t, segment_seq_t> segment_seqs;
std::map<segment_id_t, segment_type_t> segment_types;
+ btree_test_base() = default;
seastar::lowres_system_clock::time_point get_last_modified(
segment_id_t id) const final {
void update_segment_avail_bytes(paddr_t offset) final {}
segment_id_t get_segment(
- device_id_t id,
segment_seq_t seq,
- segment_type_t type) final
- {
+ segment_type_t type
+ ) final {
auto ret = next;
next = segment_id_t{
- next.device_id(),
+ segment_manager->get_device_id(),
next.device_segment_id() + 1};
segment_seqs[ret] = seq;
segment_types[ret] = type;
return ret;
}
- segment_seq_t get_seq(segment_id_t id) {
+ journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
+
+ void update_journal_tail_committed(journal_seq_t committed) final {}
+
+ SegmentManagerGroup* get_segment_manager_group() final { return sms.get(); }
+
+ segment_seq_t get_seq(segment_id_t id) final {
return segment_seqs[id];
}
- segment_type_t get_type(segment_id_t id) {
+ segment_type_t get_type(segment_id_t id) final {
return segment_types[id];
}
- journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
- void update_journal_tail_committed(journal_seq_t committed) final {}
-
virtual void complete_commit(Transaction &t) {}
seastar::future<> submit_transaction(TransactionRef t)
{
segment_manager = segment_manager::create_test_ephemeral();
sms.reset(new SegmentManagerGroup());
auto& sms_ref = *sms.get();
- journal = journal::make_segmented(
- *segment_manager, sms_ref, *this);
+ journal = journal::make_segmented(sms_ref, *this);
epm.reset(new ExtentPlacementManager());
cache.reset(new Cache(*epm));
void update_segment_avail_bytes(paddr_t offset) final {}
segment_id_t get_segment(
- device_id_t id,
segment_seq_t seq,
- segment_type_t type) final
- {
+ segment_type_t type
+ ) final {
auto ret = next;
next = segment_id_t{
- next.device_id(),
+ segment_manager->get_device_id(),
next.device_segment_id() + 1};
segment_seqs[ret] = seq;
segment_types[ret] = type;
return ret;
}
- segment_seq_t get_seq(segment_id_t id) {
+ journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
+
+ void update_journal_tail_committed(journal_seq_t paddr) final {}
+
+ SegmentManagerGroup* get_segment_manager_group() final { return sms.get(); }
+
+ segment_seq_t get_seq(segment_id_t id) final {
return segment_seqs[id];
}
return segment_types[id];
}
- journal_seq_t get_journal_tail_target() const final { return journal_seq_t{}; }
- void update_journal_tail_committed(journal_seq_t paddr) final {}
-
seastar::future<> set_up_fut() final {
segment_manager = segment_manager::create_test_ephemeral();
block_size = segment_manager->get_block_size();
sms.reset(new SegmentManagerGroup());
next = segment_id_t(segment_manager->get_device_id(), 0);
- journal = journal::make_segmented(*segment_manager, *sms, *this);
+ journal = journal::make_segmented(*sms, *this);
journal->set_write_pipeline(&pipeline);
sms->add_segment_manager(segment_manager.get());
return segment_manager->init(
return journal->close(
).safe_then([this, f=std::move(f)]() mutable {
journal = journal::make_segmented(
- *segment_manager, *sms, *this);
+ *sms, *this);
journal->set_write_pipeline(&pipeline);
return journal->replay(std::forward<T>(std::move(f)));
}).safe_then([this] {
};
auto get_seastore(SeaStore::MDStoreRef mdstore, SegmentManagerRef sm) {
- auto tm = make_transaction_manager(*sm, true);
+ auto tm = make_transaction_manager(true);
auto cm = std::make_unique<collection_manager::FlatCollectionManager>(*tm);
return std::make_unique<SeaStore>(
"",
TMTestState() : EphemeralTestState() {}
virtual void _init() override {
- tm = make_transaction_manager(*segment_manager, true);
+ tm = make_transaction_manager(true);
tm->add_device(segment_manager.get(), true);
segment_cleaner = tm->get_segment_cleaner();
lba_manager = tm->get_lba_manager();