if (offset + length > get_journal_end()) {
return crimson::ct_error::erange::make();
}
- bl.rebuild_aligned(get_block_size());
DEBUG(
"overwrite in CircularBoundedJournal, offset {}, length {}",
offset,
length);
- auto write_length = length < get_block_size() ? get_block_size() : length;
- auto bptr = bufferptr(ceph::buffer::create_page_aligned(write_length));
- auto iter = bl.cbegin();
- iter.copy(bl.length(), bptr.c_str());
- return device->write(offset, bptr
+ return device->writev(offset, bl
).handle_error(
write_ertr::pass_further{},
crimson::ct_error::assert_all{ "Invalid error device->write" }
#include <fcntl.h>
#include "crimson/common/log.h"
+#include "crimson/common/errorator-loop.h"
#include "include/buffer.h"
#include "nvmedevice.h"
});
}
+write_ertr::future<> PosixNVMeDevice::writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream) {
+ logger().debug(
+ "block: write offset {} len {}",
+ offset,
+ bl.length());
+
+ uint16_t supported_stream = stream;
+ if (stream >= stream_id_count) {
+ supported_stream = WRITE_LIFE_NOT_SET;
+ }
+ bl.rebuild_aligned(block_size);
+
+ return seastar::do_with(
+ bl.prepare_iovs(),
+ std::move(bl),
+ [this, supported_stream, offset](auto& iovs, auto& bl)
+ {
+ return write_ertr::parallel_for_each(
+ iovs,
+ [this, supported_stream, offset](auto& p) mutable
+ {
+ auto off = offset + p.offset;
+ auto len = p.length;
+ auto& iov = p.iov;
+ return io_device[supported_stream].dma_write(off, std::move(iov)
+ ).handle_exception(
+ [this, off, len](auto e) -> write_ertr::future<size_t>
+ {
+ logger().error("D{} poffset={}~{} dma_write got error -- {}",
+ get_device_id(), off, len, e);
+ return crimson::ct_error::input_output_error::make();
+ }).then([this, off, len](size_t written) -> write_ertr::future<> {
+ if (written != len) {
+ logger().error("D{} poffset={}~{} dma_write len={} inconsistent",
+ get_device_id(), off, len, written);
+ return crimson::ct_error::input_output_error::make();
+ }
+ return write_ertr::now();
+ });
+ });
+ });
+}
+
Device::close_ertr::future<> PosixNVMeDevice::close() {
logger().debug(" close ");
return device.close().then([this]() {
logger().debug(" close ");
return close_ertr::now();
}
+
+write_ertr::future<> TestMemory::writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream) {
+ ceph_assert(buf);
+ logger().debug(
+ "TestMemory: write offset {} len {}",
+ offset,
+ bl.length());
+
+ bl.begin().copy(bl.length(), buf + offset);
+ return write_ertr::now();
+}
+
}
virtual open_ertr::future<> open(
const std::string& path,
seastar::open_flags mode) = 0;
- //virtual seastar::future<> close() = 0;
+
+ virtual write_ertr::future<> writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream = 0) = 0;
/*
* For passsing through nvme IO or Admin command to SSD
return mount_ertr::now();
}
+ write_ertr::future<> writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream = 0) final;
+
nvme_command_ertr::future<int> pass_admin(
nvme_admin_command_t& admin_cmd) override;
nvme_command_ertr::future<int> pass_through_io(
close_ertr::future<> close() override;
+ write_ertr::future<> writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream = 0) final;
+
char *buf;
size_t size;
};