From a585ac27e151e2ffe1e0a347d1a09fb4c752e809 Mon Sep 17 00:00:00 2001 From: Igor Fedotov Date: Fri, 15 Sep 2023 20:35:13 +0300 Subject: [PATCH] os/bluestore: get rid off resulting lba alignment in allocators Fixes: https://tracker.ceph.com/issues/62815 Signed-off-by: Igor Fedotov (cherry picked from commit b0cb41a761dd7c32bde8e29eb12d44519a2988a5) --- src/os/bluestore/AvlAllocator.cc | 6 +- src/os/bluestore/BtreeAllocator.cc | 6 +- src/os/bluestore/StupidAllocator.cc | 32 +--- src/os/bluestore/StupidAllocator.h | 4 - src/os/bluestore/fastbmap_allocator_impl.cc | 16 +- src/test/objectstore/Allocator_test.cc | 3 +- .../objectstore/fastbmap_allocator_test.cc | 181 +++++++++++++----- src/test/objectstore/store_test.cc | 6 +- 8 files changed, 151 insertions(+), 103 deletions(-) diff --git a/src/os/bluestore/AvlAllocator.cc b/src/os/bluestore/AvlAllocator.cc index 26eba36a0ec20..afa541862fb8f 100644 --- a/src/os/bluestore/AvlAllocator.cc +++ b/src/os/bluestore/AvlAllocator.cc @@ -39,7 +39,7 @@ uint64_t AvlAllocator::_pick_block_after(uint64_t *cursor, uint64_t search_bytes = 0; auto rs_start = range_tree.lower_bound(range_t{*cursor, size}, compare); for (auto rs = rs_start; rs != range_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; *cursor = offset + size; if (offset + size <= rs->end) { return offset; @@ -59,7 +59,7 @@ uint64_t AvlAllocator::_pick_block_after(uint64_t *cursor, } // If we reached end, start from beginning till cursor. for (auto rs = range_tree.begin(); rs != rs_start; ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; *cursor = offset + size; if (offset + size <= rs->end) { return offset; @@ -82,7 +82,7 @@ uint64_t AvlAllocator::_pick_block_fits(uint64_t size, const auto compare = range_size_tree.key_comp(); auto rs_start = range_size_tree.lower_bound(range_t{0, size}, compare); for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; if (offset + size <= rs->end) { return offset; } diff --git a/src/os/bluestore/BtreeAllocator.cc b/src/os/bluestore/BtreeAllocator.cc index cf08d7ae7d31f..2455ec111b170 100644 --- a/src/os/bluestore/BtreeAllocator.cc +++ b/src/os/bluestore/BtreeAllocator.cc @@ -25,7 +25,7 @@ uint64_t BtreeAllocator::_pick_block_after(uint64_t *cursor, { auto rs_start = range_tree.lower_bound(*cursor); for (auto rs = rs_start; rs != range_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->first, align); + uint64_t offset = rs->first; if (offset + size <= rs->second) { *cursor = offset + size; return offset; @@ -37,7 +37,7 @@ uint64_t BtreeAllocator::_pick_block_after(uint64_t *cursor, } // If we reached end, start from beginning till cursor. for (auto rs = range_tree.begin(); rs != rs_start; ++rs) { - uint64_t offset = p2roundup(rs->first, align); + uint64_t offset = rs->first; if (offset + size <= rs->second) { *cursor = offset + size; return offset; @@ -53,7 +53,7 @@ uint64_t BtreeAllocator::_pick_block_fits(uint64_t size, // the needs auto rs_start = range_size_tree.lower_bound(range_value_t{0,size}); for (auto rs = rs_start; rs != range_size_tree.end(); ++rs) { - uint64_t offset = p2roundup(rs->start, align); + uint64_t offset = rs->start; if (offset + size <= rs->start + rs->size) { return offset; } diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index 550024e67e77d..8f74a499ed312 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -52,20 +52,6 @@ void StupidAllocator::_insert_free(uint64_t off, uint64_t len) } } -/// return the effective length of the extent if we align to alloc_unit -uint64_t StupidAllocator::_aligned_len( - StupidAllocator::interval_set_t::iterator p, - uint64_t alloc_unit) -{ - uint64_t skew = p.get_start() % alloc_unit; - if (skew) - skew = alloc_unit - skew; - if (skew > p.get_len()) - return 0; - else - return p.get_len() - skew; -} - int64_t StupidAllocator::allocate_int( uint64_t want_size, uint64_t alloc_unit, int64_t hint, uint64_t *offset, uint32_t *length) @@ -89,7 +75,7 @@ int64_t StupidAllocator::allocate_int( for (bin = orig_bin; bin < (int)free.size(); ++bin) { p = free[bin].lower_bound(hint); while (p != free[bin].end()) { - if (_aligned_len(p, alloc_unit) >= want_size) { + if (p.get_len() >= want_size) { goto found; } ++p; @@ -102,7 +88,7 @@ int64_t StupidAllocator::allocate_int( p = free[bin].begin(); auto end = hint ? free[bin].lower_bound(hint) : free[bin].end(); while (p != end) { - if (_aligned_len(p, alloc_unit) >= want_size) { + if (p.get_len() >= want_size) { goto found; } ++p; @@ -114,7 +100,7 @@ int64_t StupidAllocator::allocate_int( for (bin = orig_bin; bin >= 0; --bin) { p = free[bin].lower_bound(hint); while (p != free[bin].end()) { - if (_aligned_len(p, alloc_unit) >= alloc_unit) { + if (p.get_len() >= alloc_unit) { goto found; } ++p; @@ -127,7 +113,7 @@ int64_t StupidAllocator::allocate_int( p = free[bin].begin(); auto end = hint ? free[bin].lower_bound(hint) : free[bin].end(); while (p != end) { - if (_aligned_len(p, alloc_unit) >= alloc_unit) { + if (p.get_len() >= alloc_unit) { goto found; } ++p; @@ -137,11 +123,9 @@ int64_t StupidAllocator::allocate_int( return -ENOSPC; found: - uint64_t skew = p.get_start() % alloc_unit; - if (skew) - skew = alloc_unit - skew; - *offset = p.get_start() + skew; - *length = std::min(std::max(alloc_unit, want_size), p2align((p.get_len() - skew), alloc_unit)); + *offset = p.get_start(); + *length = std::min(std::max(alloc_unit, want_size), p2align(p.get_len(), alloc_unit)); + if (cct->_conf->bluestore_debug_small_allocations) { uint64_t max = alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations); @@ -158,7 +142,7 @@ int64_t StupidAllocator::allocate_int( free[bin].erase(*offset, *length); uint64_t off, len; - if (*offset && free[bin].contains(*offset - skew - 1, &off, &len)) { + if (*offset && free[bin].contains(*offset - 1, &off, &len)) { int newbin = _choose_bin(len); if (newbin != bin) { ldout(cct, 30) << __func__ << " demoting 0x" << std::hex << off << "~" << len diff --git a/src/os/bluestore/StupidAllocator.h b/src/os/bluestore/StupidAllocator.h index 0d50d73f42afd..443b091350e16 100644 --- a/src/os/bluestore/StupidAllocator.h +++ b/src/os/bluestore/StupidAllocator.h @@ -31,10 +31,6 @@ class StupidAllocator : public Allocator { unsigned _choose_bin(uint64_t len); void _insert_free(uint64_t offset, uint64_t len); - uint64_t _aligned_len( - interval_set_t::iterator p, - uint64_t alloc_unit); - public: StupidAllocator(CephContext* cct, int64_t size, diff --git a/src/os/bluestore/fastbmap_allocator_impl.cc b/src/os/bluestore/fastbmap_allocator_impl.cc index 595b124856f3c..4f735ba2efeeb 100644 --- a/src/os/bluestore/fastbmap_allocator_impl.cc +++ b/src/os/bluestore/fastbmap_allocator_impl.cc @@ -17,19 +17,9 @@ uint64_t AllocatorLevel::l2_allocs = 0; inline interval_t _align2units(uint64_t offset, uint64_t len, uint64_t min_length) { - interval_t res; - if (len >= min_length) { - res.offset = p2roundup(offset, min_length); - auto delta_off = res.offset - offset; - if (len > delta_off) { - res.length = len - delta_off; - res.length = p2align(res.length, min_length); - if (res.length) { - return res; - } - } - } - return interval_t(); + return len >= min_length ? + interval_t(offset, p2align(len, min_length)) : + interval_t(); } interval_t AllocatorLevel01Loose::_get_longest_from_l0(uint64_t pos0, diff --git a/src/test/objectstore/Allocator_test.cc b/src/test/objectstore/Allocator_test.cc index f1e3a04f4d259..b006500153c6e 100644 --- a/src/test/objectstore/Allocator_test.cc +++ b/src/test/objectstore/Allocator_test.cc @@ -516,8 +516,7 @@ TEST_P(AllocTest, test_alloc_47883) PExtentVector extents; auto need = 0x3f980000; auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents); - EXPECT_GT(got, 0); - EXPECT_EQ(got, 0x630000); + EXPECT_GE(got, 0x630000); } TEST_P(AllocTest, test_alloc_50656_best_fit) diff --git a/src/test/objectstore/fastbmap_allocator_test.cc b/src/test/objectstore/fastbmap_allocator_test.cc index c59531985050d..710b3798f7a6d 100644 --- a/src/test/objectstore/fastbmap_allocator_test.cc +++ b/src/test/objectstore/fastbmap_allocator_test.cc @@ -625,6 +625,8 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); { + // Original free space disposition (start chunk, count): + // size_t to_release = 2 * _1m + 0x1000; // release 2M + 4K at the beginning interval_vector_t r; @@ -637,6 +639,8 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, // allocate 4K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; @@ -652,79 +656,91 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 1M - should go to the second 1M chunk + // Original free space disposition (start chunk, count): + // <1, 512>, + // allocate 1M - should go to offset 4096 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(_1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, _1m); - ASSERT_EQ(a4[0].offset, _1m); + ASSERT_EQ(a4[0].offset, 4096); ASSERT_EQ(a4[0].length, _1m); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <257, 256>, // and allocate yet another 8K within the deallocated range uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(0x2000, 0x1000, &allocated4, &a4); ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(allocated4, 0x2000u); - ASSERT_EQ(a4[0].offset, 0x1000u); + ASSERT_EQ(a4[0].offset, _1m + 0x1000u); ASSERT_EQ(a4[0].length, 0x2000u); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // release just allocated 1M + // Original free space disposition (start chunk, count): + // <259, 254>, + // release 4K~1M interval_vector_t r; - r.emplace_back(_1m, _1m); + r.emplace_back(0x1000, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 2u); - ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 3u); + //ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 3M - should go to the second 1M chunk and @capacity/2 + // Original free space disposition (start chunk, count): + // <1, 257>, <259, 254>, + // allocate 3M - should go to the first 1M chunk and @capacity/2 uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(3 * _1m, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 3 * _1m); - ASSERT_EQ(a4[0].offset, _1m); + ASSERT_EQ(a4[0].offset, 0x1000); ASSERT_EQ(a4[0].length, _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 2 * _1m); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { - // release allocated 1M in the second meg chunk except + // Original free space disposition (start chunk, count): + // <259, 254>, + // release allocated 1M in the first meg chunk except // the first 4K chunk interval_vector_t r; - r.emplace_back(_1m + 0x1000, _1m); + r.emplace_back(0x1000, _1m); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <1, 256>, <259, 254>, // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); @@ -733,10 +749,12 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); - ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <1, 256>, <259, 254>, // allocate 4x512K - should go to the second halves of // the first and second 1M chunks and @(capacity / 2) uint64_t allocated4 = 0; @@ -744,51 +762,54 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) al2.allocate_l2(2 * _1m, _1m / 2, &allocated4, &a4); ASSERT_EQ(a4.size(), 3u); ASSERT_EQ(allocated4, 2 * _1m); - ASSERT_EQ(a4[0].offset, _1m / 2); + ASSERT_EQ(a4[1].offset, 0x1000); + ASSERT_EQ(a4[1].length, _1m); + ASSERT_EQ(a4[0].offset, _1m + 0x3000); ASSERT_EQ(a4[0].length, _1m / 2); - ASSERT_EQ(a4[1].offset, _1m + _1m / 2); - ASSERT_EQ(a4[1].length, _1m / 2); ASSERT_EQ(a4[2].offset, capacity / 2); - ASSERT_EQ(a4[2].length, _1m); + ASSERT_EQ(a4[2].length, _1m / 2); bins_overall.clear(); al2.collect_stats(bins_overall); - ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - // below we have 512K - 4K & 512K - 12K chunks which both fit into - // the same bin = 6 - ASSERT_EQ(bins_overall[6], 2u); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { - // cleanup first 2M except except the last 4K chunk + // Original free space disposition (start chunk, count): + // <387, 126>, + // cleanup first 1536K except the last 4K chunk interval_vector_t r; - r.emplace_back(0, 2 * _1m - 0x1000); + r.emplace_back(0, _1m + _1m / 2 - 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { - // release 2M @(capacity / 2) + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, + // release 512K @(capacity / 2) interval_vector_t r; - r.emplace_back(capacity / 2, 2 * _1m); + r.emplace_back(capacity / 2, _1m / 2); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); - ASSERT_EQ(bins_overall[0], 1u); - ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 132M using 4M granularity should go to (capacity / 2) + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, + // allocate 132M (=33792*4096) = using 4M granularity should go to (capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; al2.allocate_l2(132 * _1m, 4 * _1m , &allocated4, &a4); @@ -799,24 +820,40 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits((_1m + _1m / 2 - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x2000 - 0x80000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { - // cleanup left 4K chunk in the first 2M + // Original free space disposition (start chunk, count): + // <0, 383> <387, 126>, + // cleanup remaining 4*4K chunks in the first 2M interval_vector_t r; - r.emplace_back(2 * _1m - 0x1000, 0x1000); + r.emplace_back(383 * 4096, 4 * 0x1000); al2.free_l2(r); bins_overall.clear(); al2.collect_stats(bins_overall); ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, // release 132M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((2 * _1m + 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <0, 513>, // allocate 132M using 2M granularity should go to the first chunk and to // (capacity / 2) uint64_t allocated4 = 0; @@ -827,14 +864,31 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(a4[0].length, 2 * _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 130 * _1m); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 33792) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <512, 1>, // release 130M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 132 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { + // Original free space disposition (start chunk, count): + // <512,1>, // release 4K~16K // release 28K~32K // release 68K~24K @@ -843,21 +897,46 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) r.emplace_back(0x7000, 0x8000); r.emplace_back(0x11000, 0x6000); al2.free_l2(r); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 4u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(0x4000 / 0x1000) - 1], 2u); // accounts both 0x4000 & 0x6000 + ASSERT_EQ(bins_overall[cbits(0x8000 / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { - // allocate 32K using 16K granularity - should bypass the first - // unaligned extent, use the second free extent partially given - // the 16K alignment and then fallback to capacity / 2 + // Original free space disposition (start chunk, count): + // <1, 4>, <7, 8>, <17, 6> <512,1>, + // allocate 80K using 16K granularity uint64_t allocated4 = 0; interval_vector_t a4; - al2.allocate_l2(0x8000, 0x4000, &allocated4, &a4); - ASSERT_EQ(a4.size(), 2u); - ASSERT_EQ(a4[0].offset, 0x8000u); - ASSERT_EQ(a4[0].length, 0x4000u); - ASSERT_EQ(a4[1].offset, capacity / 2); + al2.allocate_l2(0x14000, 0x4000, &allocated4, &a4); + + ASSERT_EQ(a4.size(), 4); + ASSERT_EQ(a4[1].offset, 0x1000u); ASSERT_EQ(a4[1].length, 0x4000u); - } + ASSERT_EQ(a4[0].offset, 0x7000u); + ASSERT_EQ(a4[0].length, 0x8000u); + ASSERT_EQ(a4[2].offset, 0x11000u); + ASSERT_EQ(a4[2].length, 0x4000u); + ASSERT_EQ(a4[3].offset, capacity / 2); + ASSERT_EQ(a4[3].length, 0x4000u); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits(0)], 1u); + ASSERT_EQ(bins_overall[cbits(0x2000 / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2 - 1) - 1], 1u); + } + { + // Original free space disposition (start chunk, count): + // <21, 2> <512,1>, + } } std::cout << "Done L2 cont aligned" << std::endl; } @@ -913,7 +992,7 @@ TEST(TestAllocatorLevel01, test_4G_alloc_bug2) al2.allocate_l2(0x3e000000, _1m, &allocated4, &a4); ASSERT_EQ(a4.size(), 2u); ASSERT_EQ(allocated4, 0x3e000000u); - ASSERT_EQ(a4[0].offset, 0x5fed00000u); + ASSERT_EQ(a4[0].offset, 0x5fec30000u); ASSERT_EQ(a4[0].length, 0x1300000u); ASSERT_EQ(a4[1].offset, 0x628000000u); ASSERT_EQ(a4[1].length, 0x3cd00000u); diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc index 482d302830416..9edfebd6b9916 100644 --- a/src/test/objectstore/store_test.cc +++ b/src/test/objectstore/store_test.cc @@ -9524,9 +9524,9 @@ TEST_P(StoreTestSpecificAUSize, BluestoreRepairSharedBlobTest) { string key; _key_encode_u64(1, &key); bluestore_shared_blob_t sb(1); - sb.ref_map.get(0x2000, block_size); - sb.ref_map.get(0x4000, block_size); - sb.ref_map.get(0x4000, block_size); + sb.ref_map.get(0x822000, block_size); + sb.ref_map.get(0x824000, block_size); + sb.ref_map.get(0x824000, block_size); bufferlist bl; encode(sb, bl); bstore->inject_broken_shared_blob_key(key, bl); -- 2.39.5