From: Igor Fedotov Date: Tue, 12 Mar 2019 11:04:17 +0000 (+0300) Subject: os/bluestore: os/bluestore: implement dump for bitmap allocator X-Git-Tag: v14.2.1~104^2~4 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=a6450ece9bf235ca622ef2ab6be09d01c5140fa2;p=ceph.git os/bluestore: os/bluestore: implement dump for bitmap allocator Signed-off-by: Igor Fedotov (cherry picked from commit 80310d98a7456afdb2002eaf2891853ea21eaf68) --- diff --git a/src/os/bluestore/BitmapAllocator.cc b/src/os/bluestore/BitmapAllocator.cc index ef7c0f003b6be..f501783a67449 100755 --- a/src/os/bluestore/BitmapAllocator.cc +++ b/src/os/bluestore/BitmapAllocator.cc @@ -83,3 +83,19 @@ void BitmapAllocator::shutdown() ldout(cct, 1) << __func__ << dendl; _shutdown(); } + +void BitmapAllocator::dump() +{ + // bin -> interval count + std::map bins_overall; + collect_stats(bins_overall); + auto it = bins_overall.begin(); + while (it != bins_overall.end()) { + ldout(cct, 0) << __func__ + << " bin " << it->first + << "(< " << byte_u_t((1 << (it->first + 1)) * get_min_alloc_size()) << ")" + << " : " << it->second << " extents" + << dendl; + ++it; + } +} diff --git a/src/os/bluestore/BitmapAllocator.h b/src/os/bluestore/BitmapAllocator.h index d47d307c8e425..223c21dfbc5e8 100755 --- a/src/os/bluestore/BitmapAllocator.h +++ b/src/os/bluestore/BitmapAllocator.h @@ -35,9 +35,7 @@ public: return get_available(); } - void dump() override - { - } + void dump() override; double get_fragmentation(uint64_t) override { return _get_fragmentation(); diff --git a/src/os/bluestore/fastbmap_allocator_impl.cc b/src/os/bluestore/fastbmap_allocator_impl.cc index 5919bce45a10b..f53d31a6fc7f2 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.cc +++ b/src/os/bluestore/fastbmap_allocator_impl.cc @@ -32,7 +32,6 @@ inline interval_t _align2units(uint64_t offset, uint64_t len, uint64_t min_lengt return interval_t(); } - interval_t AllocatorLevel01Loose::_get_longest_from_l0(uint64_t pos0, uint64_t pos1, uint64_t min_length, interval_t* tail) const { @@ -508,3 +507,38 @@ bool AllocatorLevel01Loose::_allocate_l1(uint64_t length, } return _is_empty_l1(l1_pos_start, l1_pos_end); } + +void AllocatorLevel01Loose::collect_stats( + std::map& bins_overall) +{ + size_t free_seq_cnt = 0; + for (auto slot : l0) { + if (slot == all_slot_set) { + free_seq_cnt += CHILD_PER_SLOT_L0; + } else if(slot != all_slot_clear) { + size_t pos = 0; + do { + auto pos1 = find_next_set_bit(slot, pos); + if (pos1 == pos) { + free_seq_cnt++; + pos = pos1 + 1; + } else { + if (free_seq_cnt) { + bins_overall[cbits(free_seq_cnt) - 1]++; + free_seq_cnt = 0; + } + if (pos1 < bits_per_slot) { + free_seq_cnt = 1; + } + pos = pos1 + 1; + } + } while (pos < bits_per_slot); + } else if (free_seq_cnt) { + bins_overall[cbits(free_seq_cnt) - 1]++; + free_seq_cnt = 0; + } + } + if (free_seq_cnt) { + bins_overall[cbits(free_seq_cnt) - 1]++; + } +} diff --git a/src/os/bluestore/fastbmap_allocator_impl.h b/src/os/bluestore/fastbmap_allocator_impl.h index 6b138205a3dff..d043715ca0a35 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.h +++ b/src/os/bluestore/fastbmap_allocator_impl.h @@ -87,6 +87,9 @@ public: virtual ~AllocatorLevel() {} + virtual void collect_stats( + std::map& bins_overall) = 0; + }; class AllocatorLevel01 : public AllocatorLevel @@ -466,6 +469,8 @@ public: } return res * l0_granularity; } + void collect_stats( + std::map& bins_overall) override; }; class AllocatorLevel01Compact : public AllocatorLevel01 @@ -475,6 +480,11 @@ class AllocatorLevel01Compact : public AllocatorLevel01 return 8; } public: + void collect_stats( + std::map& bins_overall) override + { + // not implemented + } }; template @@ -503,6 +513,12 @@ public: { return l1.get_min_alloc_size(); } + void collect_stats( + std::map& bins_overall) override { + + std::lock_guard l(lock); + l1.collect_stats(bins_overall); + } protected: ceph::mutex lock = ceph::make_mutex("AllocatorLevel02::lock"); diff --git a/src/test/objectstore/fastbmap_allocator_test.cc b/src/test/objectstore/fastbmap_allocator_test.cc index 537ebc36d6dd4..5c5503a806280 100755 --- a/src/test/objectstore/fastbmap_allocator_test.cc +++ b/src/test/objectstore/fastbmap_allocator_test.cc @@ -483,6 +483,7 @@ TEST(TestAllocatorLevel01, test_l2_unaligned) uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB al2.init(capacity, 0x1000); std::cout << "Init L2 Unaligned" << std::endl; + for (uint64_t i = 0; i < capacity; i += _1m / 2) { uint64_t allocated4 = 0; interval_vector_t a4; @@ -589,8 +590,16 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) TestAllocatorLevel02 al2; uint64_t num_l2_entries = 3; uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB - al2.init(capacity, 0x1000); + uint64_t num_chunks = capacity / 4096; + al2.init(capacity, 4096); std::cout << "Init L2 cont aligned" << std::endl; + + std::map bins_overall; + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 1u); +// std::cout<first << std::endl; + ASSERT_EQ(bins_overall[cbits(num_chunks) - 1], 1u); + for (uint64_t i = 0; i < capacity / 2; i += _1m) { uint64_t allocated4 = 0; interval_vector_t a4; @@ -602,11 +611,22 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) } ASSERT_EQ(capacity / 2, al2.debug_get_free()); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); + { + size_t to_release = 2 * _1m + 0x1000; // release 2M + 4K at the beginning interval_vector_t r; - r.emplace_back(0, 2 * _1m + 0x1000); + r.emplace_back(0, to_release); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(to_release / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 4K within the deallocated range @@ -617,6 +637,11 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(allocated4, 0x1000u); ASSERT_EQ(a4[0].offset, 0u); ASSERT_EQ(a4[0].length, 0x1000u); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits(2 * _1m / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 1M - should go to the second 1M chunk @@ -627,6 +652,12 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(allocated4, _1m); ASSERT_EQ(a4[0].offset, _1m); ASSERT_EQ(a4[0].length, _1m); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[0], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // and allocate yet another 8K within the deallocated range @@ -637,12 +668,22 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(allocated4, 0x2000u); ASSERT_EQ(a4[0].offset, 0x1000u); ASSERT_EQ(a4[0].length, 0x2000u); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall[0], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // release just allocated 1M interval_vector_t r; r.emplace_back(_1m, _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 2u); + ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 3M - should go to the second 1M chunk and @capacity/2 @@ -655,6 +696,12 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(a4[0].length, _1m); ASSERT_EQ(a4[1].offset, capacity / 2); ASSERT_EQ(a4[1].length, 2 * _1m); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[0], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { // release allocated 1M in the second meg chunk except @@ -662,15 +709,27 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) interval_vector_t r; r.emplace_back(_1m + 0x1000, _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u); } { // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u); } { - // allocate 3x512K - should go to the second halves of + // allocate 4x512K - should go to the second halves of // the first and second 1M chunks and @(capacity / 2) uint64_t allocated4 = 0; interval_vector_t a4; @@ -683,18 +742,42 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(a4[1].length, _1m / 2); ASSERT_EQ(a4[2].offset, capacity / 2); ASSERT_EQ(a4[2].length, _1m); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[0], 1u); + // below we have 512K - 4K & 512K - 12K chunks which both fit into + // the same bin = 6 + ASSERT_EQ(bins_overall[6], 2u); + ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); + } { // cleanup first 2M except except the last 4K chunk interval_vector_t r; r.emplace_back(0, 2 * _1m - 0x1000); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[0], 1u); + ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u); } { // release 2M @(capacity / 2) interval_vector_t r; r.emplace_back(capacity / 2, 2 * _1m); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 3u); + ASSERT_EQ(bins_overall[0], 1u); + ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u); + ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u); } { // allocate 132M using 4M granularity should go to (capacity / 2) @@ -704,12 +787,20 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) ASSERT_EQ(a4.size(), 1u); ASSERT_EQ(a4[0].offset, capacity / 2); ASSERT_EQ(a4[0].length, 132 * _1m); + + bins_overall.clear(); + al2.collect_stats(bins_overall); + ASSERT_EQ(bins_overall.size(), 3u); } { // cleanup left 4K chunk in the first 2M interval_vector_t r; r.emplace_back(2 * _1m - 0x1000, 0x1000); al2.free_l2(r); + bins_overall.clear(); + al2.collect_stats(bins_overall); + + ASSERT_EQ(bins_overall.size(), 2u); } { // release 132M @(capacity / 2) @@ -760,6 +851,6 @@ TEST(TestAllocatorLevel01, test_l2_contiguous_alignment) } } - std::cout << "Done L2 cont aligned" << std::endl; } +