);
}
+SegmentedAllocator::Writer::open_ertr::future<>
+SegmentedAllocator::Writer::open()
+{
+ return segment_allocator.open().discard_result();
+}
+
SegmentedAllocator::Writer::write_iertr::future<>
SegmentedAllocator::Writer::_write(
Transaction& t,
return write_iertr::now();
}
return seastar::with_gate(write_guard, [this, &t, &extents] {
- if (!roll_promise.has_value() &&
- !segment_allocator.can_write()) {
- roll_promise = seastar::shared_promise<>();
- return trans_intr::make_interruptible(
- segment_allocator.open().discard_result()
- ).finally([this] {
- roll_promise->set_value();
- roll_promise.reset();
- }).si_then([this, &t, &extents] {
- return do_write(t, extents);
- });
- }
return do_write(t, extents);
});
}
* Interface through which final write to ool segment is performed.
*/
class ExtentOolWriter {
+ using base_ertr = crimson::errorator<
+ crimson::ct_error::input_output_error>;
public:
- using write_iertr = trans_iertr<crimson::errorator<
- crimson::ct_error::input_output_error, // media error or corruption
- crimson::ct_error::invarg, // if offset is < write pointer or misaligned
- crimson::ct_error::ebadf, // segment closed
- crimson::ct_error::enospc // write exceeds segment size
- >>;
-
- using stop_ertr = Segment::close_ertr;
- virtual stop_ertr::future<> stop() = 0;
+ virtual ~ExtentOolWriter() {}
+
+ using open_ertr = base_ertr;
+ virtual open_ertr::future<> open() = 0;
+
+ using write_iertr = trans_iertr<base_ertr>;
virtual write_iertr::future<> write(
Transaction& t,
std::list<LogicalCachedExtentRef>& extent) = 0;
- virtual ~ExtentOolWriter() {}
+
+ using stop_ertr = base_ertr;
+ virtual stop_ertr::future<> stop() = 0;
};
/**
*/
class ExtentAllocator {
public:
- using alloc_paddr_iertr = trans_iertr<crimson::errorator<
- crimson::ct_error::input_output_error, // media error or corruption
- crimson::ct_error::invarg, // if offset is < write pointer or misaligned
- crimson::ct_error::ebadf, // segment closed
- crimson::ct_error::enospc // write exceeds segment size
- >>;
+ using open_ertr = ExtentOolWriter::open_ertr;
+ virtual open_ertr::future<> open() = 0;
+ using alloc_paddr_iertr = ExtentOolWriter::write_iertr;
virtual alloc_paddr_iertr::future<> alloc_ool_extents_paddr(
Transaction& t,
std::list<LogicalCachedExtentRef>&) = 0;
Writer(Writer &&) = default;
+ open_ertr::future<> open() final;
+
write_iertr::future<> write(
Transaction& t,
std::list<LogicalCachedExtentRef>& extent) final;
stop_ertr::future<> stop() final {
return write_guard.close().then([this] {
return segment_allocator.close();
+ }).safe_then([this] {
+ write_guard = seastar::gate();
});
}
}
}
+ open_ertr::future<> open() {
+ return crimson::do_for_each(writers, [](auto& writer) {
+ return writer.open();
+ });
+ }
+
alloc_paddr_iertr::future<> alloc_ool_extents_paddr(
Transaction& t,
std::list<LogicalCachedExtentRef>& extents) final {
public:
ExtentPlacementManager() = default;
+ void add_allocator(device_type_t type, ExtentAllocatorRef&& allocator) {
+ allocators[type].emplace_back(std::move(allocator));
+ LOG_PREFIX(ExtentPlacementManager::add_allocator);
+ SUBDEBUG(seastore_tm, "allocators for {}: {}",
+ type,
+ allocators[type].size());
+ }
+
+ using open_ertr = ExtentOolWriter::open_ertr;
+ open_ertr::future<> open() {
+ LOG_PREFIX(ExtentPlacementManager::open);
+ SUBINFO(seastore_tm, "started");
+ return crimson::do_for_each(allocators, [](auto& allocators_item) {
+ return crimson::do_for_each(allocators_item.second, [](auto& allocator) {
+ return allocator->open();
+ });
+ });
+ }
+
struct alloc_result_t {
paddr_t paddr;
bufferptr bp;
});
}
- void add_allocator(device_type_t type, ExtentAllocatorRef&& allocator) {
- allocators[type].emplace_back(std::move(allocator));
- LOG_PREFIX(ExtentPlacementManager::add_allocator);
- SUBDEBUG(seastore_tm, "allocators for {}: {}",
- type,
- allocators[type].size());
+ using close_ertr = ExtentOolWriter::stop_ertr;
+ close_ertr::future<> close() {
+ LOG_PREFIX(ExtentPlacementManager::close);
+ SUBINFO(seastore_tm, "started");
+ return crimson::do_for_each(allocators, [](auto& allocators_item) {
+ return crimson::do_for_each(allocators_item.second, [](auto& allocator) {
+ return allocator->stop();
+ });
+ });
}
private: