}
JournalRef make_circularbounded(
- crimson::os::seastore::nvme_device::RBMDevice* device,
+ crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path)
{
return std::make_unique<CircularBoundedJournal>(device, path);
namespace crimson::os::seastore {
-namespace nvme_device {
+namespace random_block_device {
class RBMDevice;
}
JournalRef make_segmented(SegmentProvider &provider);
JournalRef make_circularbounded(
- crimson::os::seastore::nvme_device::RBMDevice* device,
+ crimson::os::seastore::random_block_device::RBMDevice* device,
std::string path);
}
constexpr rbm_abs_addr CBJOURNAL_START_ADDRESS = 0;
constexpr uint64_t CBJOURNAL_MAGIC = 0xCCCC;
-using RBMDevice = nvme_device::RBMDevice;
+using RBMDevice = random_block_device::RBMDevice;
/**
* CircularBoundedJournal
constexpr uint32_t RBM_SUPERBLOCK_SIZE = 4096;
-using RBMDevice = nvme_device::RBMDevice;
+using RBMDevice = random_block_device::RBMDevice;
using RBMDeviceRef = std::unique_ptr<RBMDevice>;
enum {
#include "include/buffer.h"
#include "rbm_device.h"
+#include "nvme_block_device.h"
namespace {
seastar::logger& logger() {
}
}
-namespace crimson::os::seastore::nvme_device {
+namespace crimson::os::seastore::random_block_device::nvme {
open_ertr::future<> NVMeBlockDevice::open(
const std::string &in_path,
return device.ioctl(NVME_IOCTL_IO_CMD, &io_cmd);
}
+}
+
+namespace crimson::os::seastore::random_block_device {
+
open_ertr::future<> TestMemory::open(
const std::string &in_path,
seastar::open_flags mode) {
--- /dev/null
+//-*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <memory>
+#include <vector>
+
+#include <seastar/core/file.hh>
+#include <linux/nvme_ioctl.h>
+
+#include "crimson/osd/exceptions.h"
+#include "crimson/common/layout.h"
+#include "rbm_device.h"
+
+namespace ceph {
+ namespace buffer {
+ class bufferptr;
+ }
+}
+
+namespace crimson::os::seastore::random_block_device::nvme {
+/*
+ * NVMe protocol structures (nvme_XX, identify_XX)
+ *
+ * All structures relative to NVMe protocol are following NVMe protocol v1.4
+ * (latest). NVMe is protocol for fast interfacing between user and SSD device.
+ * We selectively adopted features among various NVMe features to ease
+ * implementation. And also, NVMeBlockDevice provides generic command submission
+ * APIs for IO and Admin commands. Please use pass_through_io() and pass_admin()
+ * to do it.
+ *
+ * For more information about NVMe protocol, refer https://nvmexpress.org/
+ */
+struct nvme_identify_command_t {
+ uint32_t common_dw[10];
+
+ uint32_t cns : 8;
+ uint32_t reserved : 8;
+ uint32_t cnt_id : 16;
+
+ static const uint8_t CNS_NAMESPACE = 0x00;
+ static const uint8_t CNS_CONTROLLER = 0x01;
+};
+
+struct nvme_admin_command_t {
+ union {
+ nvme_passthru_cmd common;
+ nvme_identify_command_t identify;
+ };
+
+ static const uint8_t OPCODE_IDENTIFY = 0x06;
+};
+
+// Optional Admin Command Support (OACS)
+// Indicates optional commands are supported by SSD or not
+struct oacs_t {
+ uint16_t unused : 5;
+ uint16_t support_directives : 1; // Support multi-stream
+ uint16_t unused2 : 10;
+};
+
+struct nvme_identify_controller_data_t {
+ union {
+ struct {
+ uint8_t unused[256]; // [255:0]
+ oacs_t oacs; // [257:256]
+ uint8_t unused2[270]; // [527:258]
+ uint16_t awupf; // [529:528]
+ };
+ uint8_t raw[4096];
+ };
+};
+
+// End-to-end Data Protection Capabilities (DPC)
+// Indicates type of E2E data protection supported by SSD
+struct dpc_t {
+ uint8_t support_type1 : 1;
+ uint8_t support_type2 : 1;
+ uint8_t support_type3 : 1;
+ uint8_t support_first_meta : 1;
+ uint8_t support_last_meta : 1;
+ uint8_t reserved : 3;
+};
+
+// End-to-end Data Protection Type Settings (DPS)
+// Indicates enabled type of E2E data protection
+struct dps_t {
+ uint8_t protection_type : 3;
+ uint8_t protection_info : 1;
+ uint8_t reserved : 4;
+};
+
+// Namespace Features (NSFEAT)
+// Indicates features of namespace
+struct nsfeat_t {
+ uint8_t thinp : 1;
+ uint8_t nsabp : 1;
+ uint8_t dae : 1;
+ uint8_t uid_reuse : 1;
+ uint8_t opterf : 1; // Support NPWG, NPWA
+ uint8_t reserved : 3;
+};
+
+// LBA Format (LBAF)
+// Indicates LBA format (metadata size, data size, performance)
+struct lbaf_t {
+ uint32_t ms : 16;
+ uint32_t lbads : 8;
+ uint32_t rp : 2;
+ uint32_t reserved : 6;
+};
+
+struct nvme_identify_namespace_data_t {
+ union {
+ struct {
+ uint8_t unused[24]; // [23:0]
+ nsfeat_t nsfeat; // [24]
+ uint8_t unused2[3]; // [27:25]
+ dpc_t dpc; // [28]
+ dps_t dps; // [29]
+ uint8_t unused3[34]; // [63:30]
+ uint16_t npwg; // [65:64]
+ uint16_t npwa; // [67:66]
+ uint8_t unused4[60]; // [127:68]
+ lbaf_t lbaf0; // [131:128]
+ };
+ uint8_t raw[4096];
+ };
+};
+
+struct nvme_rw_command_t {
+ uint32_t common_dw[10];
+
+ uint64_t s_lba;
+
+ uint32_t nlb : 16; // 0's based value
+ uint32_t reserved : 4;
+ uint32_t d_type : 4;
+ uint32_t reserved2 : 2;
+ uint32_t prinfo_prchk : 3;
+ uint32_t prinfo_pract : 1;
+ uint32_t fua : 1;
+ uint32_t lr : 1;
+
+ uint32_t reserved3 : 16;
+ uint32_t dspec : 16;
+
+ static const uint32_t DTYPE_STREAM = 1;
+};
+
+struct nvme_io_command_t {
+ union {
+ nvme_passthru_cmd common;
+ nvme_rw_command_t rw;
+ };
+ static const uint8_t OPCODE_WRITE = 0x01;
+ static const uint8_t OPCODE_READ = 0x01;
+};
+
+/*
+ * Implementation of NVMeBlockDevice with POSIX APIs
+ *
+ * NVMeBlockDevice provides NVMe SSD interfaces through POSIX APIs which is
+ * generally available at most operating environment.
+ */
+class NVMeBlockDevice : public RBMDevice {
+public:
+
+ /*
+ * Service NVMe device relative size
+ *
+ * size : total size of device in byte.
+ *
+ * block_size : IO unit size in byte. Caller should follow every IO command
+ * aligned with block size.
+ *
+ * preffered_write_granularity(PWG), preffered_write_alignment(PWA) : IO unit
+ * size for write in byte. Caller should request every write IO sized multiple
+ * times of PWG and aligned starting address by PWA. Available only if NVMe
+ * Device supports NVMe protocol 1.4 or later versions.
+ * atomic_write_unit : The maximum size of write whose atomicity is guranteed
+ * by SSD even on power failure. The write equal to or smaller than
+ * atomic_write_unit does not require fsync().
+ */
+
+ NVMeBlockDevice() {}
+ ~NVMeBlockDevice() = default;
+
+ open_ertr::future<> open(
+ const std::string &in_path,
+ seastar::open_flags mode) override;
+
+ write_ertr::future<> write(
+ uint64_t offset,
+ bufferptr &bptr,
+ uint16_t stream = 0) override;
+
+ using RBMDevice::read;
+ read_ertr::future<> read(
+ uint64_t offset,
+ bufferptr &bptr) final;
+
+ close_ertr::future<> close() override;
+
+ discard_ertr::future<> discard(
+ uint64_t offset,
+ uint64_t len) override;
+
+ mkfs_ret mkfs(device_config_t) final {
+ return mkfs_ertr::now();
+ }
+
+ mount_ret mount() final {
+ return mount_ertr::now();
+ }
+
+ write_ertr::future<> writev(
+ uint64_t offset,
+ ceph::bufferlist bl,
+ uint16_t stream = 0) final;
+
+ uint64_t get_preffered_write_granularity() const { return write_granularity; }
+ uint64_t get_preffered_write_alignment() const { return write_alignment; }
+ uint64_t get_atomic_write_unit() const { return atomic_write_unit; }
+ /*
+ * End-to-End Data Protection
+ *
+ * NVMe device keeps track of data integrity similar with checksum. Client can
+ * offload checksuming to NVMe device to reduce its CPU utilization. If data
+ * protection is enabled, checksum is calculated on every write and used to
+ * verify data on every read.
+ */
+ bool is_data_protection_enabled() const { return data_protection_enabled; }
+
+ /*
+ * Data Health
+ *
+ * Returns list of LBAs which have almost corrupted data. Data of the LBAs
+ * will be corrupted very soon. Caller can overwrite, unmap or refresh data to
+ * protect data
+ */
+ virtual nvme_command_ertr::future<std::list<uint64_t>> get_data_health() {
+ std::list<uint64_t> fragile_lbas;
+ return nvme_command_ertr::future<std::list<uint64_t>>(
+ nvme_command_ertr::ready_future_marker{},
+ fragile_lbas
+ );
+ }
+
+ /*
+ * Recovery Level
+ *
+ * Regulate magnitude of SSD-internal data recovery. Caller can get good read
+ * latency with lower magnitude.
+ */
+ virtual nvme_command_ertr::future<> set_data_recovery_level(
+ uint32_t level) { return nvme_command_ertr::now(); }
+ /*
+ * For passsing through nvme IO or Admin command to SSD
+ * Caller can construct and execute its own nvme command
+ */
+ nvme_command_ertr::future<int> pass_admin(
+ nvme_admin_command_t& admin_cmd);
+ nvme_command_ertr::future<int> pass_through_io(
+ nvme_io_command_t& io_cmd);
+
+ bool support_multistream = false;
+ uint8_t data_protection_type = 0;
+
+ /*
+ * Predictable Latency
+ *
+ * NVMe device can guarantee IO latency within pre-defined time window. This
+ * functionality will be analyzed soon.
+ */
+
+private:
+ // identify_controller/namespace are used to get SSD internal information such
+ // as supported features, NPWG and NPWA
+ nvme_command_ertr::future<nvme_identify_controller_data_t> identify_controller();
+ nvme_command_ertr::future<nvme_identify_namespace_data_t> identify_namespace();
+ nvme_command_ertr::future<int> get_nsid();
+ open_ertr::future<> open_for_io(
+ const std::string& in_path,
+ seastar::open_flags mode);
+
+ seastar::file device;
+ std::vector<seastar::file> io_device;
+ uint32_t stream_index_to_open = WRITE_LIFE_NOT_SET;
+ uint32_t stream_id_count = 1; // stream is disabled, defaultly.
+ uint32_t awupf = 0;
+
+ uint64_t write_granularity = 4096;
+ uint64_t write_alignment = 4096;
+ uint32_t atomic_write_unit = 4096;
+
+ bool data_protection_enabled = false;
+};
+
+}
#pragma once
-#include <memory>
-#include <vector>
-#include <boost/intrusive_ptr.hpp>
-#include <boost/smart_ptr/intrusive_ref_counter.hpp>
-
-#include <seastar/core/file.hh>
-#include <seastar/core/future.hh>
-#include <seastar/core/reactor.hh>
-#include <linux/nvme_ioctl.h>
-#include <libaio.h>
-
-#include "crimson/osd/exceptions.h"
-#include "crimson/common/layout.h"
#include "crimson/os/seastore/seastore_types.h"
#include "crimson/os/seastore/random_block_manager.h"
#include "crimson/os/seastore/device.h"
}
}
-namespace crimson::os::seastore::nvme_device {
+namespace crimson::os::seastore::random_block_device {
// from blk/BlockDevice.h
#if defined(__linux__)
#define WRITE_LIFE_MAX 1
#endif
-/*
- * NVMe protocol structures (nvme_XX, identify_XX)
- *
- * All structures relative to NVMe protocol are following NVMe protocol v1.4
- * (latest). NVMe is protocol for fast interfacing between user and SSD device.
- * We selectively adopted features among various NVMe features to ease
- * implementation. And also, NVMeBlockDevice provides generic command submission
- * APIs for IO and Admin commands. Please use pass_through_io() and pass_admin()
- * to do it.
- *
- * For more information about NVMe protocol, refer https://nvmexpress.org/
- */
-struct nvme_identify_command_t {
- uint32_t common_dw[10];
-
- uint32_t cns : 8;
- uint32_t reserved : 8;
- uint32_t cnt_id : 16;
-
- static const uint8_t CNS_NAMESPACE = 0x00;
- static const uint8_t CNS_CONTROLLER = 0x01;
-};
-
-struct nvme_admin_command_t {
- union {
- nvme_passthru_cmd common;
- nvme_identify_command_t identify;
- };
-
- static const uint8_t OPCODE_IDENTIFY = 0x06;
-};
-
-// Optional Admin Command Support (OACS)
-// Indicates optional commands are supported by SSD or not
-struct oacs_t {
- uint16_t unused : 5;
- uint16_t support_directives : 1; // Support multi-stream
- uint16_t unused2 : 10;
-};
-
-struct nvme_identify_controller_data_t {
- union {
- struct {
- uint8_t unused[256]; // [255:0]
- oacs_t oacs; // [257:256]
- uint8_t unused2[270]; // [527:258]
- uint16_t awupf; // [529:528]
- };
- uint8_t raw[4096];
- };
-};
-
-// End-to-end Data Protection Capabilities (DPC)
-// Indicates type of E2E data protection supported by SSD
-struct dpc_t {
- uint8_t support_type1 : 1;
- uint8_t support_type2 : 1;
- uint8_t support_type3 : 1;
- uint8_t support_first_meta : 1;
- uint8_t support_last_meta : 1;
- uint8_t reserved : 3;
-};
-
-// End-to-end Data Protection Type Settings (DPS)
-// Indicates enabled type of E2E data protection
-struct dps_t {
- uint8_t protection_type : 3;
- uint8_t protection_info : 1;
- uint8_t reserved : 4;
-};
-
-// Namespace Features (NSFEAT)
-// Indicates features of namespace
-struct nsfeat_t {
- uint8_t thinp : 1;
- uint8_t nsabp : 1;
- uint8_t dae : 1;
- uint8_t uid_reuse : 1;
- uint8_t opterf : 1; // Support NPWG, NPWA
- uint8_t reserved : 3;
-};
-
-// LBA Format (LBAF)
-// Indicates LBA format (metadata size, data size, performance)
-struct lbaf_t {
- uint32_t ms : 16;
- uint32_t lbads : 8;
- uint32_t rp : 2;
- uint32_t reserved : 6;
-};
-
-struct nvme_identify_namespace_data_t {
- union {
- struct {
- uint8_t unused[24]; // [23:0]
- nsfeat_t nsfeat; // [24]
- uint8_t unused2[3]; // [27:25]
- dpc_t dpc; // [28]
- dps_t dps; // [29]
- uint8_t unused3[34]; // [63:30]
- uint16_t npwg; // [65:64]
- uint16_t npwa; // [67:66]
- uint8_t unused4[60]; // [127:68]
- lbaf_t lbaf0; // [131:128]
- };
- uint8_t raw[4096];
- };
-};
-
-struct nvme_rw_command_t {
- uint32_t common_dw[10];
-
- uint64_t s_lba;
-
- uint32_t nlb : 16; // 0's based value
- uint32_t reserved : 4;
- uint32_t d_type : 4;
- uint32_t reserved2 : 2;
- uint32_t prinfo_prchk : 3;
- uint32_t prinfo_pract : 1;
- uint32_t fua : 1;
- uint32_t lr : 1;
-
- uint32_t reserved3 : 16;
- uint32_t dspec : 16;
-
- static const uint32_t DTYPE_STREAM = 1;
-};
-
-struct nvme_io_command_t {
- union {
- nvme_passthru_cmd common;
- nvme_rw_command_t rw;
- };
- static const uint8_t OPCODE_WRITE = 0x01;
- static const uint8_t OPCODE_READ = 0x01;
-};
-
using read_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg,
using discard_ertr = crimson::errorator<
crimson::ct_error::input_output_error>;
-struct io_context_t {
- iocb cb;
- bool done = false;
-};
-
class RBMDevice : public Device {
public:
using Device::read;
bool is_data_protection_enabled() const { return false; }
};
-/*
- * Implementation of NVMeBlockDevice with POSIX APIs
- *
- * NVMeBlockDevice provides NVMe SSD interfaces through POSIX APIs which is
- * generally available at most operating environment.
- */
-class NVMeBlockDevice : public RBMDevice {
-public:
-
- /*
- * Service NVMe device relative size
- *
- * size : total size of device in byte.
- *
- * block_size : IO unit size in byte. Caller should follow every IO command
- * aligned with block size.
- *
- * preffered_write_granularity(PWG), preffered_write_alignment(PWA) : IO unit
- * size for write in byte. Caller should request every write IO sized multiple
- * times of PWG and aligned starting address by PWA. Available only if NVMe
- * Device supports NVMe protocol 1.4 or later versions.
- * atomic_write_unit : The maximum size of write whose atomicity is guranteed
- * by SSD even on power failure. The write equal to or smaller than
- * atomic_write_unit does not require fsync().
- */
-
- NVMeBlockDevice() {}
- ~NVMeBlockDevice() = default;
-
- open_ertr::future<> open(
- const std::string &in_path,
- seastar::open_flags mode) override;
-
- write_ertr::future<> write(
- uint64_t offset,
- bufferptr &bptr,
- uint16_t stream = 0) override;
-
- using RBMDevice::read;
- read_ertr::future<> read(
- uint64_t offset,
- bufferptr &bptr) final;
-
- close_ertr::future<> close() override;
-
- discard_ertr::future<> discard(
- uint64_t offset,
- uint64_t len) override;
-
- mkfs_ret mkfs(device_config_t) final {
- return mkfs_ertr::now();
- }
-
- mount_ret mount() final {
- return mount_ertr::now();
- }
-
- write_ertr::future<> writev(
- uint64_t offset,
- ceph::bufferlist bl,
- uint16_t stream = 0) final;
-
- uint64_t get_preffered_write_granularity() const { return write_granularity; }
- uint64_t get_preffered_write_alignment() const { return write_alignment; }
- uint64_t get_atomic_write_unit() const { return atomic_write_unit; }
- /*
- * End-to-End Data Protection
- *
- * NVMe device keeps track of data integrity similar with checksum. Client can
- * offload checksuming to NVMe device to reduce its CPU utilization. If data
- * protection is enabled, checksum is calculated on every write and used to
- * verify data on every read.
- */
- bool is_data_protection_enabled() const { return data_protection_enabled; }
-
- /*
- * Data Health
- *
- * Returns list of LBAs which have almost corrupted data. Data of the LBAs
- * will be corrupted very soon. Caller can overwrite, unmap or refresh data to
- * protect data
- */
- virtual nvme_command_ertr::future<std::list<uint64_t>> get_data_health() {
- std::list<uint64_t> fragile_lbas;
- return nvme_command_ertr::future<std::list<uint64_t>>(
- nvme_command_ertr::ready_future_marker{},
- fragile_lbas
- );
- }
-
- /*
- * Recovery Level
- *
- * Regulate magnitude of SSD-internal data recovery. Caller can get good read
- * latency with lower magnitude.
- */
- virtual nvme_command_ertr::future<> set_data_recovery_level(
- uint32_t level) { return nvme_command_ertr::now(); }
- /*
- * For passsing through nvme IO or Admin command to SSD
- * Caller can construct and execute its own nvme command
- */
- nvme_command_ertr::future<int> pass_admin(
- nvme_admin_command_t& admin_cmd);
- nvme_command_ertr::future<int> pass_through_io(
- nvme_io_command_t& io_cmd);
-
- bool support_multistream = false;
- uint8_t data_protection_type = 0;
-
- /*
- * Predictable Latency
- *
- * NVMe device can guarantee IO latency within pre-defined time window. This
- * functionality will be analyzed soon.
- */
-
-private:
- // identify_controller/namespace are used to get SSD internal information such
- // as supported features, NPWG and NPWA
- nvme_command_ertr::future<nvme_identify_controller_data_t> identify_controller();
- nvme_command_ertr::future<nvme_identify_namespace_data_t> identify_namespace();
- nvme_command_ertr::future<int> get_nsid();
- open_ertr::future<> open_for_io(
- const std::string& in_path,
- seastar::open_flags mode);
-
- seastar::file device;
- std::vector<seastar::file> io_device;
- uint32_t stream_index_to_open = WRITE_LIFE_NOT_SET;
- uint32_t stream_id_count = 1; // stream is disabled, defaultly.
- uint32_t awupf = 0;
-
- uint64_t write_granularity = 4096;
- uint64_t write_alignment = 4096;
- uint32_t atomic_write_unit = 4096;
-
- bool data_protection_enabled = false;
-};
-
class TestMemory : public RBMDevice {
public:
#include "include/buffer.h"
#include "crimson/os/seastore/random_block_manager/rbm_device.h"
+#include "crimson/os/seastore/random_block_manager/nvme_block_device.h"
#include "test/crimson/gtest_seastar.h"
#include "include/stringify.h"
using namespace crimson;
using namespace crimson::os;
using namespace crimson::os::seastore;
-using namespace nvme_device;
+using namespace random_block_device;
+using namespace random_block_device::nvme;
struct nvdev_test_t : seastar_test_suite_t {
std::unique_ptr<RBMDevice> device;
Cache cache;
std::vector<entry_validator_t> entries;
std::unique_ptr<CircularBoundedJournal> cbj;
- nvme_device::RBMDevice *device;
+ random_block_device::RBMDevice *device;
std::default_random_engine generator;
uint64_t block_size;
epm(new ExtentPlacementManager(true)),
cache(*epm)
{
- device = new nvme_device::TestMemory(CBTEST_DEFAULT_TEST_SIZE + CBTEST_DEFAULT_BLOCK_SIZE);
+ device = new random_block_device::TestMemory(CBTEST_DEFAULT_TEST_SIZE + CBTEST_DEFAULT_BLOCK_SIZE);
cbj.reset(new CircularBoundedJournal(device, std::string()));
device_id_t d_id = 1 << (std::numeric_limits<device_id_t>::digits - 1);
config.block_size = CBTEST_DEFAULT_BLOCK_SIZE;
struct rbm_test_t :
public seastar_test_suite_t, TMTestState {
std::unique_ptr<BlockRBManager> rbm_manager;
- std::unique_ptr<nvme_device::RBMDevice> device;
+ std::unique_ptr<random_block_device::RBMDevice> device;
struct rbm_transaction {
void add_rbm_allocated_blocks(alloc_delta_t &d) {
rbm_test_t() = default;
seastar::future<> set_up_fut() final {
- device.reset(new nvme_device::TestMemory(DEFAULT_TEST_SIZE));
+ device.reset(new random_block_device::TestMemory(DEFAULT_TEST_SIZE));
rbm_manager.reset(new BlockRBManager(device.get(), std::string()));
device_id_t d_id = 1 << (std::numeric_limits<device_id_t>::digits - 1);
config.start = paddr_t::make_blk_paddr(d_id, 0);
protected:
segment_manager::EphemeralSegmentManagerRef segment_manager;
std::list<segment_manager::EphemeralSegmentManagerRef> secondary_segment_managers;
- std::unique_ptr<nvme_device::RBMDevice> rb_device;
+ std::unique_ptr<random_block_device::RBMDevice> rb_device;
tm_make_config_t tm_config = tm_make_config_t::get_test_segmented_journal();
EphemeralTestState(std::size_t num_segment_managers) {
if (tm_config.j_type == journal_type_t::CIRCULARBOUNDED_JOURNAL) {
auto config =
journal::CircularBoundedJournal::mkfs_config_t::get_default();
- rb_device.reset(new nvme_device::TestMemory(config.total_size));
+ rb_device.reset(new random_block_device::TestMemory(config.total_size));
rb_device->set_device_id(
1 << (std::numeric_limits<device_id_t>::digits - 1));
}