LOG_PREFIX(FixedKVInternalNode::resolve_relative_addrs);
for (auto i: *this) {
if (i->get_val().is_relative()) {
- auto updated = base.add_relative(i->get_val());
+ paddr_t updated;
+ if (base.get_addr_type() == addr_types_t::SEGMENT) {
+ updated = base.add_relative(i->get_val());
+ } else {
+ updated = base.add_offset(i->get_val().as_seg_paddr().get_segment_off());
+ }
SUBTRACE(seastore_fixedkv_tree, "{} -> {}", i->get_val(), updated);
i->set_val(updated);
}
} else {
auto sseq = NULL_SEG_SEQ;
auto stype = segment_type_t::NULL_SEG;
- if (cleaner != nullptr) {
+ if (cleaner != nullptr && i->get_paddr().get_addr_type() ==
+ addr_types_t::SEGMENT) {
auto sid = i->get_paddr().as_seg_paddr().get_segment_id();
auto &sinfo = cleaner->get_seg_info(sid);
sseq = sinfo.seq;
bool is_inline = false;
if (i->is_inline()) {
is_inline = true;
- i->set_paddr(final_block_start.add_relative(i->get_paddr()));
+ if (final_block_start.get_addr_type() == addr_types_t::SEGMENT) {
+ i->set_paddr(final_block_start.add_relative(i->get_paddr()));
+ } else if (final_block_start.get_addr_type() ==
+ addr_types_t::RANDOM_BLOCK) {
+ i->set_paddr(final_block_start.add_offset(
+ i->get_paddr().as_seg_paddr().get_segment_off()));
+ }
}
i->last_committed_crc = i->get_crc32c();
i->on_initial_write();
for (auto &alloc_blk : alloc_delta.alloc_blk_ranges) {
if (alloc_blk.paddr.is_relative()) {
assert(alloc_blk.paddr.is_record_relative());
- alloc_blk.paddr = record_base.add_relative(alloc_blk.paddr);
+ if (record_base.get_addr_type() == addr_types_t::SEGMENT) {
+ alloc_blk.paddr = record_base.add_relative(alloc_blk.paddr);
+ } else {
+ alloc_blk.paddr = record_base.add_offset(
+ alloc_blk.paddr.as_seg_paddr().get_segment_off());
+ }
}
DEBUG("replay alloc_blk {}~{} {}, journal_seq: {}",
alloc_blk.paddr, alloc_blk.len, alloc_blk.laddr, journal_seq);
).safe_then([this, config, FNAME]() mutable -> mkfs_ret {
rbm_abs_addr start_addr = convert_paddr_to_abs_addr(
config.start);
- assert(config.block_size == device->get_block_size());
+ assert(static_cast<seastore_off_t>(config.block_size) ==
+ device->get_block_size());
ceph::bufferlist bl;
CircularBoundedJournal::cbj_header_t head;
head.magic = CBJOURNAL_MAGIC;
}).safe_then([]() {
return mkfs_ertr::now();
});
- }).handle_error(
- mkfs_ertr::pass_further{},
- crimson::ct_error::assert_all{
- "Invalid error _open_device in CircularBoundedJournal::mkfs"
- }).finally([this] {
+ }).safe_then([this]() {
if (device) {
- return device->close();
- } else {
- return seastar::now();
+ return device->close(
+ ).safe_then([]() {
+ return mkfs_ertr::now();
+ });
}
+ return mkfs_ertr::now();
});
}
auto& delta = p.second;
return d_handler(locator,
delta,
+ locator.write_result.start_seq,
seastar::lowres_system_clock::time_point(
seastar::lowres_system_clock::duration(commit_time))
);
size_t get_block_size() const {
return header.block_size;
}
+ void add_device(NVMeBlockDevice* dev) {
+ device = dev;
+ }
private:
cbj_header_t header;
NVMeBlockDevice* device;
for (auto i: *this) {
if (i->get_val().paddr.is_relative()) {
auto val = i->get_val();
- val.paddr = base.add_relative(val.paddr);
+ if (base.get_addr_type() == addr_types_t::SEGMENT) {
+ val.paddr = base.add_relative(val.paddr);
+ } else {
+ val.paddr = base.add_offset(val.paddr.as_seg_paddr().get_segment_off());
+ }
TRACE("{} -> {}", i->get_val().paddr, val.paddr);
i->set_val(val);
}
"Invalid error read_rbm_header in NVMeManager::mkfs"
}
);
+ }).safe_then([this]() {
+ if (device) {
+ return device->close(
+ ).safe_then([]() {
+ return mkfs_ertr::now();
+ });
+ }
+ return mkfs_ertr::now();
}).handle_error(
mkfs_ertr::pass_further{},
crimson::ct_error::assert_all{
"Invalid error open_device in NVMeManager::mkfs"
- }).finally([this] {
- if (device) {
- return device->close();
- } else {
- return seastar::now();
- }
});
}
});
}
-seastar::future<> PosixNVMeDevice::close() {
+Device::close_ertr::future<> PosixNVMeDevice::close() {
logger().debug(" close ");
return device.close().then([this]() {
return seastar::do_for_each(io_device, [](auto target_device) {
return read_ertr::now();
}
-seastar::future<> TestMemory::close() {
+Device::close_ertr::future<> TestMemory::close() {
logger().debug(" close ");
- return seastar::now();
+ return close_ertr::now();
}
}
#include "crimson/osd/exceptions.h"
#include "crimson/common/layout.h"
+#include "crimson/os/seastore/seastore_types.h"
+#include "crimson/os/seastore/random_block_manager.h"
+#include "crimson/os/seastore/device.h"
namespace ceph {
namespace buffer {
* Various implementations with different interfaces such as POSIX APIs, Seastar,
* and SPDK, are available.
*/
-class NVMeBlockDevice {
+class NVMeBlockDevice : public Device {
+public:
+ using Device::read;
+ read_ertr::future<> read (
+ paddr_t addr,
+ size_t len,
+ ceph::bufferptr &out) final {
+ uint64_t rbm_addr = convert_paddr_to_abs_addr(addr);
+ return read(rbm_addr, out);
+ }
protected:
uint64_t size = 0;
uint32_t atomic_write_unit = 4096;
bool data_protection_enabled = false;
-
+ device_id_t device_id;
+ seastore_meta_t meta;
+ secondary_device_set_t devices;
public:
NVMeBlockDevice() {}
virtual ~NVMeBlockDevice() = default;
return std::make_unique<T>();
}
+ device_id_t get_device_id() const {
+ return device_id;
+ }
+ void set_device_id(device_id_t id) {
+ device_id = id;
+ }
+
+ magic_t get_magic() const final {
+ return magic_t();
+ }
+
+ device_type_t get_device_type() const final {
+ return device_type_t::RANDOM_BLOCK;
+ }
+
+ const seastore_meta_t &get_meta() const final {
+ return meta;
+ }
+
+ secondary_device_set_t& get_secondary_devices() final {
+ return devices;
+ }
+
/*
* Service NVMe device relative size
*
* by SSD even on power failure. The write equal to or smaller than
* atomic_write_unit does not require fsync().
*/
- uint64_t get_size() const { return size; }
- uint64_t get_block_size() const { return block_size; }
+
+ std::size_t get_size() const { return size; }
+ seastore_off_t get_block_size() const { return block_size; }
uint64_t get_preffered_write_granularity() const { return write_granularity; }
uint64_t get_preffered_write_alignment() const { return write_alignment; }
virtual open_ertr::future<> open(
const std::string& path,
seastar::open_flags mode) = 0;
- virtual seastar::future<> close() = 0;
+ //virtual seastar::future<> close() = 0;
/*
* For passsing through nvme IO or Admin command to SSD
bufferptr &bptr,
uint16_t stream = 0) override;
+ using NVMeBlockDevice::read;
read_ertr::future<> read(
uint64_t offset,
- bufferptr &bptr) override;
+ bufferptr &bptr) final;
- seastar::future<> close() override;
+ close_ertr::future<> close() override;
discard_ertr::future<> discard(
uint64_t offset,
uint64_t len) override;
+ mkfs_ret mkfs(device_config_t) final {
+ return mkfs_ertr::now();
+ }
+
+ mount_ret mount() final {
+ return mount_ertr::now();
+ }
+
nvme_command_ertr::future<int> pass_admin(
nvme_admin_command_t& admin_cmd) override;
nvme_command_ertr::future<int> pass_through_io(
}
}
+ mkfs_ret mkfs(device_config_t) final {
+ return mkfs_ertr::now();
+ }
+
+ mount_ret mount() final {
+ return mount_ertr::now();
+ }
+
open_ertr::future<> open(
const std::string &in_path,
seastar::open_flags mode) override;
bufferptr &bptr,
uint16_t stream = 0) override;
+ using NVMeBlockDevice::read;
read_ertr::future<> read(
uint64_t offset,
bufferptr &bptr) override;
- seastar::future<> close() override;
+ close_ertr::future<> close() override;
char *buf;
size_t size;
*/
paddr_t maybe_relative_to(paddr_t base) const {
assert(!base.is_block_relative());
- seg_paddr_t& s = base.as_seg_paddr();
- if (is_block_relative())
+ if (is_block_relative()) {
+ seg_paddr_t& s = base.as_seg_paddr();
return s.add_block_relative(*this);
- else
+ } else
return *this;
}
};
void adjust_addrs_from_base(paddr_t base) {
paddr_t _root_addr = root_addr;
if (_root_addr.is_relative()) {
- root_addr = base.add_record_relative(_root_addr);
+ if (base.get_addr_type() == addr_types_t::SEGMENT) {
+ root_addr = base.add_record_relative(_root_addr);
+ } else {
+ // RANDOM_BLOCK
+ root_addr = base.add_offset(_root_addr.as_seg_paddr().get_segment_off());
+ }
}
}
};
inline paddr_t paddr_t::maybe_relative_to(paddr_t o) const {
PADDR_OPERATION(addr_types_t::SEGMENT, seg_paddr_t, maybe_relative_to(o))
+ if (get_addr_type() == addr_types_t::RANDOM_BLOCK) {
+ return *this;
+ }
ceph_assert(0 == "not supported type");
return P_ADDR_NULL;
}
journal_seq_t alloc_replay_from)
{
LOG_PREFIX(SegmentCleaner::update_journal_tail_target);
+ if (dirty_replay_from.offset.get_addr_type() == addr_types_t::RANDOM_BLOCK) {
+ return;
+ }
if (dirty_extents_replay_from == JOURNAL_SEQ_NULL
|| dirty_replay_from > dirty_extents_replay_from) {
DEBUG("dirty_extents_replay_from={} => {}",
time_point last_modified = time_point(),
time_point last_rewritten = time_point(),
bool init_scan = false) {
+ if (addr.get_addr_type() != addr_types_t::SEGMENT)
+ return;
auto& seg_addr = addr.as_seg_paddr();
if (!init_scan && !init_complete)
const bool force = false) {
if (!init_complete && !force)
return;
+ if (addr.get_addr_type() != addr_types_t::SEGMENT)
+ return;
ceph_assert(stats.used_bytes >= len);
stats.used_bytes -= len;
TransactionManager::~TransactionManager() {}
-TransactionManagerRef make_transaction_manager(bool detailed)
+TransactionManagerRef make_transaction_manager(bool detailed, bool cbjournal)
{
auto epm = std::make_unique<ExtentPlacementManager>();
auto cache = std::make_unique<Cache>(*epm);
*backref_manager,
*cache,
detailed);
- auto journal = journal::make_segmented(*segment_cleaner);
+ JournalRef journal;
+ if (!cbjournal) {
+ journal = journal::make_segmented(*segment_cleaner);
+ } else {
+ journal = journal::make_circularbounded(
+ nullptr, "");
+ }
epm->init_ool_writers(
*segment_cleaner,
segment_cleaner->get_ool_segment_seq_allocator());
alloc_extent_ret<T> alloc_extent(
Transaction &t,
laddr_t laddr_hint,
- extent_len_t len) {
+ extent_len_t len,
+ placement_hint_t hint = placement_hint_t::HOT) {
placement_hint_t placement_hint;
if constexpr (T::TYPE == extent_types_t::OBJECT_DATA_BLOCK ||
T::TYPE == extent_types_t::COLL_BLOCK) {
placement_hint = placement_hint_t::COLD;
} else {
- placement_hint = placement_hint_t::HOT;
+ placement_hint = hint;
}
LOG_PREFIX(TransactionManager::alloc_extent);
SUBTRACET(seastore_tm, "{} len={}, placement_hint={}, laddr_hint={}",
dev->get_device_id(), is_primary);
epm->add_device(dev, is_primary);
- ceph_assert(dev->get_device_type() == device_type_t::SEGMENTED);
- auto sm = dynamic_cast<SegmentManager*>(dev);
- ceph_assert(sm != nullptr);
- sm_group.add_segment_manager(sm);
+ if (dev->get_device_type() == device_type_t::SEGMENTED) {
+ auto sm = dynamic_cast<SegmentManager*>(dev);
+ ceph_assert(sm != nullptr);
+ sm_group.add_segment_manager(sm);
+ }
}
~TransactionManager();
auto get_cache() {
return cache.get();
}
+ auto get_journal() {
+ return journal.get();
+ }
};
using TransactionManagerRef = std::unique_ptr<TransactionManager>;
-TransactionManagerRef make_transaction_manager(bool detailed);
+TransactionManagerRef make_transaction_manager(bool detailed, bool cbjournal = false);
}
}
int ret = memcmp(original_data.data, read_data.data, BUF_SIZE);
- device->close().wait();
+ device->close().unsafe_get();
ASSERT_TRUE(ret == 0);
device.reset(nullptr);
});
auto replay() {
cbj->replay(
- [this](const auto &offsets, const auto &e, auto last_modified)
+ [this](const auto &offsets, const auto &e, auto j_seq, auto last_modified)
-> Journal::replay_ret {
bool found = false;
for (auto &i : entries) {
struct transaction_manager_test_t :
public seastar_test_suite_t,
- TMTestState {
+ TMTestState,
+ ::testing::WithParamInterface<const char*> {
std::random_device rd;
std::mt19937 gen;
return static_cast<char>(std::uniform_int_distribution<>(0, 255)(gen));
}
+ bool for_segmented() {
+ std::string j_type = GetParam();
+ if (j_type == "segmented") {
+ return true;
+ }
+ return false;
+ }
seastar::future<> set_up_fut() final {
- return tm_setup();
+ std::string j_type = GetParam();
+ if (j_type == "segmented") {
+ return tm_setup();
+ } else if (j_type == "circularbounded") {
+ return tm_setup(journal_type::CIRCULARBOUNDED_JOURNAL);
+ } else {
+ ceph_assert(0 == "no support");
+ }
}
seastar::future<> tear_down_fut() final {
test_transaction_t &t,
laddr_t hint,
extent_len_t len,
- char contents) {
+ char contents,
+ placement_hint_t p_hint = placement_hint_t::HOT) {
auto extent = with_trans_intr(*(t.t), [&](auto& trans) {
- return tm->alloc_extent<TestBlock>(trans, hint, len);
+ return tm->alloc_extent<TestBlock>(trans, hint, len, p_hint);
}).unsafe_get0();
extent->set_contents(contents);
EXPECT_FALSE(test_mappings.contains(extent->get_laddr(), t.mapping_delta));
TestBlockRef alloc_extent(
test_transaction_t &t,
laddr_t hint,
- extent_len_t len) {
+ extent_len_t len,
+ placement_hint_t p_hint = placement_hint_t::HOT) {
return alloc_extent(
t,
hint,
len,
- get_random_contents());
+ get_random_contents(),
+ p_hint);
}
bool check_usage() {
return backref_manager->scan_mapped_space(
t,
[&tracker](auto offset, auto len, depth_t) {
- logger().debug("check_usage: tracker alloc {}~{}",
- offset, len);
- tracker->allocate(
- offset.as_seg_paddr().get_segment_id(),
- offset.as_seg_paddr().get_segment_off(),
- len);
+ if (offset.get_addr_type() == addr_types_t::SEGMENT) {
+ logger().debug("check_usage: tracker alloc {}~{}",
+ offset, len);
+ tracker->allocate(
+ offset.as_seg_paddr().get_segment_id(),
+ offset.as_seg_paddr().get_segment_off(),
+ len);
+ }
}).si_then([&tracker, this] {
auto &backrefs = cache->get_backrefs();
for (auto &backref : backrefs) {
- logger().debug("check_usage: by backref, tracker alloc {}~{}",
- backref.paddr, backref.len);
- tracker->allocate(
- backref.paddr.as_seg_paddr().get_segment_id(),
- backref.paddr.as_seg_paddr().get_segment_off(),
- backref.len);
+ if (backref.paddr.get_addr_type() == addr_types_t::SEGMENT) {
+ logger().debug("check_usage: by backref, tracker alloc {}~{}",
+ backref.paddr, backref.len);
+ tracker->allocate(
+ backref.paddr.as_seg_paddr().get_segment_id(),
+ backref.paddr.as_seg_paddr().get_segment_off(),
+ backref.len);
+ }
}
auto &del_backrefs = cache->get_del_backrefs();
for (auto &del_backref : del_backrefs) {
- logger().debug("check_usage: by backref, tracker release {}~{}",
- del_backref.paddr, del_backref.len);
- tracker->release(
- del_backref.paddr.as_seg_paddr().get_segment_id(),
- del_backref.paddr.as_seg_paddr().get_segment_off(),
- del_backref.len);
+ if (del_backref.paddr.get_addr_type() == addr_types_t::SEGMENT) {
+ logger().debug("check_usage: by backref, tracker release {}~{}",
+ del_backref.paddr, del_backref.len);
+ tracker->release(
+ del_backref.paddr.as_seg_paddr().get_segment_id(),
+ del_backref.paddr.as_seg_paddr().get_segment_off(),
+ del_backref.len);
+ }
}
return seastar::now();
});
boost::make_counting_iterator(num),
[&, this](auto) {
return tm->alloc_extent<TestBlock>(
- *(t.t), L_ADDR_MIN, size
+ *(t.t), L_ADDR_MIN, size,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE
).si_then([&, this](auto extent) {
extent->set_contents(get_random_contents());
EXPECT_FALSE(
auto extent = alloc_extent(
t,
i * BSIZE,
- BSIZE);
+ BSIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(i * BSIZE, extent->get_laddr());
if (try_submit_transaction(std::move(t)))
break;
tm_multi_device_test_t() : transaction_manager_test_t(3) {}
};
-TEST_F(tm_single_device_test_t, basic)
+TEST_P(tm_single_device_test_t, basic)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
});
}
-TEST_F(tm_single_device_test_t, mutate)
+TEST_P(tm_single_device_test_t, mutate)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
});
}
-TEST_F(tm_single_device_test_t, allocate_lba_conflict)
+TEST_P(tm_single_device_test_t, allocate_lba_conflict)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
t2,
ADDR2,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR2, extent2->get_laddr());
check_mappings(t2);
extent2.reset();
});
}
-TEST_F(tm_single_device_test_t, mutate_lba_conflict)
+TEST_P(tm_single_device_test_t, mutate_lba_conflict)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
- SIZE);
+ SIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
}
check_mappings(t);
submit_transaction(std::move(t));
});
}
-TEST_F(tm_single_device_test_t, concurrent_mutate_lba_no_conflict)
+TEST_P(tm_single_device_test_t, concurrent_mutate_lba_no_conflict)
{
constexpr laddr_t SIZE = 4096;
constexpr size_t NUM = 500;
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
- SIZE);
+ SIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
}
submit_transaction(std::move(t));
}
});
}
-TEST_F(tm_single_device_test_t, create_remove_same_transaction)
+TEST_P(tm_single_device_test_t, create_remove_same_transaction)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
dec_ref(t, ADDR);
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
submit_transaction(std::move(t));
check();
});
}
-TEST_F(tm_single_device_test_t, split_merge_read_same_transaction)
+TEST_P(tm_single_device_test_t, split_merge_read_same_transaction)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
auto extent = alloc_extent(
t,
laddr_t(i * SIZE),
- SIZE);
+ SIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
}
check_mappings(t);
submit_transaction(std::move(t));
});
}
-TEST_F(tm_single_device_test_t, inc_dec_ref)
+TEST_P(tm_single_device_test_t, inc_dec_ref)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
ADDR,
SIZE,
- 'a');
+ 'a',
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(ADDR, extent->get_laddr());
check_mappings(t);
check();
});
}
-TEST_F(tm_single_device_test_t, cause_lba_split)
+TEST_P(tm_single_device_test_t, cause_lba_split)
{
constexpr laddr_t SIZE = 4096;
run_async([this] {
t,
i * SIZE,
SIZE,
- (char)(i & 0xFF));
+ (char)(i & 0xFF),
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(i * SIZE, extent->get_laddr());
submit_transaction(std::move(t));
}
});
}
-TEST_F(tm_single_device_test_t, random_writes)
+TEST_P(tm_single_device_test_t, random_writes)
{
constexpr size_t TOTAL = 4<<20;
constexpr size_t BSIZE = 4<<10;
auto extent = alloc_extent(
t,
i * BSIZE,
- BSIZE);
+ BSIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
ASSERT_EQ(i * BSIZE, extent->get_laddr());
submit_transaction(std::move(t));
}
auto padding = alloc_extent(
t,
TOTAL + (k * PADDING_SIZE),
- PADDING_SIZE);
+ PADDING_SIZE,
+ for_segmented() ? placement_hint_t::HOT : placement_hint_t::REWRITE);
dec_ref(t, padding->get_laddr());
}
submit_transaction(std::move(t));
});
}
-TEST_F(tm_single_device_test_t, find_hole_assert_trigger)
+TEST_P(tm_single_device_test_t, find_hole_assert_trigger)
{
constexpr unsigned max = 10;
constexpr size_t BSIZE = 4<<10;
});
}
-TEST_F(tm_single_device_test_t, random_writes_concurrent)
+TEST_P(tm_single_device_test_t, random_writes_concurrent)
{
test_random_writes_concurrent();
}
-TEST_F(tm_multi_device_test_t, random_writes_concurrent)
+TEST_P(tm_multi_device_test_t, random_writes_concurrent)
{
test_random_writes_concurrent();
}
+
+INSTANTIATE_TEST_SUITE_P(
+ transaction_manager_test,
+ tm_single_device_test_t,
+ ::testing::Values (
+ "segmented",
+ "circularbounded"
+ )
+);
+
+INSTANTIATE_TEST_SUITE_P(
+ transaction_manager_test,
+ tm_multi_device_test_t,
+ ::testing::Values (
+ "segmented",
+ "circularbounded"
+ )
+);
#include "crimson/os/seastore/segment_manager.h"
#include "crimson/os/seastore/collection_manager/flat_collection_manager.h"
#include "crimson/os/seastore/onode_manager/staged-fltree/fltree_onode_manager.h"
+#include "crimson/os/seastore/random_block_manager/nvmedevice.h"
+#include "crimson/os/seastore/journal/circular_bounded_journal.h"
using namespace crimson;
using namespace crimson::os;
protected:
segment_manager::EphemeralSegmentManagerRef segment_manager;
std::list<segment_manager::EphemeralSegmentManagerRef> secondary_segment_managers;
+ std::unique_ptr<nvme_device::NVMeBlockDevice> rb_device;
+ journal_type j_type = journal_type::SEGMENT_JOURNAL;
EphemeralTestState(std::size_t num_segment_managers) {
assert(num_segment_managers > 0);
_mount().handle_error(crimson::ct_error::assert_all{}).get0();
}
- seastar::future<> tm_setup() {
+ seastar::future<> tm_setup(journal_type j_type = journal_type::SEGMENT_JOURNAL) {
+ this->j_type = j_type;
segment_manager = segment_manager::create_test_ephemeral();
for (auto &sec_sm : secondary_segment_managers) {
sec_sm = segment_manager::create_test_ephemeral();
}
+ if (j_type == journal_type::CIRCULARBOUNDED_JOURNAL) {
+ auto config =
+ journal::CircularBoundedJournal::mkfs_config_t::get_default();
+ rb_device.reset(new nvme_device::TestMemory(config.total_size));
+ rb_device->set_device_id(
+ 1 << (std::numeric_limits<device_id_t>::digits - 1));
+ }
return segment_manager->init(
).safe_then([this] {
return crimson::do_for_each(
for (auto &sec_sm : secondary_segment_managers) {
sec_sm.reset();
}
+ if (j_type == journal_type::CIRCULARBOUNDED_JOURNAL) {
+ rb_device.reset();
+ }
});
}
};
TMTestState(std::size_t num_devices) : EphemeralTestState(num_devices) {}
virtual void _init() override {
- tm = make_transaction_manager(true);
+ tm = make_transaction_manager(true,
+ j_type == journal_type::SEGMENT_JOURNAL ? false : true);
tm->add_device(segment_manager.get(), true);
+ if (j_type == journal_type::CIRCULARBOUNDED_JOURNAL) {
+ tm->add_device(rb_device.get(), false);
+ static_cast<journal::CircularBoundedJournal*>(tm->get_journal())->
+ add_device(rb_device.get());
+ }
if (get_num_devices() > 1) {
for (auto &sec_sm : secondary_segment_managers) {
tm->add_device(sec_sm.get(), false);
}
virtual FuturizedStore::mkfs_ertr::future<> _mkfs() {
- return tm->mkfs(
- ).handle_error(
- crimson::ct_error::assert_all{"Error in teardown"}
- );
+ if (j_type == journal_type::SEGMENT_JOURNAL) {
+ return tm->mkfs(
+ ).handle_error(
+ crimson::ct_error::assert_all{"Error in mkfs"}
+ );
+ } else {
+ auto config = journal::CircularBoundedJournal::mkfs_config_t::get_default();
+ return static_cast<journal::CircularBoundedJournal*>(tm->get_journal())->mkfs(
+ config
+ ).safe_then([this]() {
+ return tm->mkfs(
+ ).handle_error(
+ crimson::ct_error::assert_all{"Error in mkfs"}
+ );
+ }).handle_error(
+ crimson::ct_error::assert_all{"Error in mkfs"}
+ );
+ }
}
auto create_mutate_transaction() {