From: Igor Fedotov Date: Wed, 7 Feb 2018 18:43:48 +0000 (+0300) Subject: os/bluestore: cleanup around ExtentList class. X-Git-Tag: wip-pdonnell-testing-20180317.202121~333^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=2d78999451c50b3ed9d77c118c1ce0ec9110772d;p=ceph-ci.git os/bluestore: cleanup around ExtentList class. Also move it out of bluestore_types.h. Signed-off-by: Igor Fedotov --- diff --git a/src/os/bluestore/BitAllocator.cc b/src/os/bluestore/BitAllocator.cc index 03ac942c889..203be748509 100644 --- a/src/os/bluestore/BitAllocator.cc +++ b/src/os/bluestore/BitAllocator.cc @@ -41,7 +41,28 @@ int64_t BitMapAreaLeaf::count = 0; int64_t BitMapZone::count = 0; int64_t BitMapZone::total_blocks = 0; +void AllocatorExtentList::add_extents(int64_t start, int64_t count) +{ + bluestore_pextent_t *last_extent = NULL; + bool can_merge = false; + + if (!m_extents->empty()) { + last_extent = &(m_extents->back()); + uint64_t last_offset = last_extent->end() / m_block_size; + uint32_t last_length = last_extent->length / m_block_size; + if ((last_offset == (uint64_t) start) && + (!m_max_blocks || (last_length + count) <= m_max_blocks)) { + can_merge = true; + } + } + if (can_merge) { + last_extent->length += (count * m_block_size); + } else { + m_extents->emplace_back(bluestore_pextent_t(start * m_block_size, + count * m_block_size)); + } +} int64_t BmapEntityListIter::index() { @@ -425,7 +446,7 @@ int64_t BitMapZone::alloc_blocks_dis(int64_t num_blocks, int64_t min_alloc, int64_t hint, int64_t zone_blk_off, - ExtentList *alloc_blocks) + AllocatorExtentList *alloc_blocks) { int64_t bmap_idx = hint / BmapEntry::size(); int bit = hint % BmapEntry::size(); @@ -786,7 +807,7 @@ bool BitMapAreaIN::is_allocated(int64_t start_block, int64_t num_blocks) } int64_t BitMapAreaIN::alloc_blocks_dis_int_work(bool wrap, int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t area_blk_off, ExtentList *block_list) + int64_t hint, int64_t area_blk_off, AllocatorExtentList *block_list) { BitMapArea *child = NULL; int64_t allocated = 0; @@ -815,14 +836,14 @@ int64_t BitMapAreaIN::alloc_blocks_dis_int_work(bool wrap, int64_t num_blocks, i } int64_t BitMapAreaIN::alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t area_blk_off, ExtentList *block_list) + int64_t hint, int64_t area_blk_off, AllocatorExtentList *block_list) { return alloc_blocks_dis_int_work(false, num_blocks, min_alloc, hint, area_blk_off, block_list); } int64_t BitMapAreaIN::alloc_blocks_dis(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t blk_off, ExtentList *block_list) + int64_t hint, int64_t blk_off, AllocatorExtentList *block_list) { int64_t allocated = 0; @@ -1010,7 +1031,7 @@ inline bool BitMapAreaLeaf::child_check_n_lock(BitMapZone* const child, } int64_t BitMapAreaLeaf::alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t area_blk_off, ExtentList *block_list) + int64_t hint, int64_t area_blk_off, AllocatorExtentList *block_list) { BitMapZone* child = nullptr; int64_t allocated = 0; @@ -1302,20 +1323,20 @@ void BitAllocator::set_blocks_used(int64_t start_block, int64_t num_blocks) * Allocate N dis-contiguous blocks. */ int64_t BitAllocator::alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t area_blk_off, ExtentList *block_list) + int64_t hint, int64_t area_blk_off, AllocatorExtentList *block_list) { return alloc_blocks_dis_int_work(true, num_blocks, min_alloc, hint, area_blk_off, block_list); } int64_t BitAllocator::alloc_blocks_dis_res(int64_t num_blocks, int64_t min_alloc, - int64_t hint, ExtentList *block_list) + int64_t hint, AllocatorExtentList *block_list) { return alloc_blocks_dis_work(num_blocks, min_alloc, hint, block_list, true); } int64_t BitAllocator::alloc_blocks_dis_work(int64_t num_blocks, int64_t min_alloc, - int64_t hint, ExtentList *block_list, bool reserved) + int64_t hint, AllocatorExtentList *block_list, bool reserved) { int scans = 1; int64_t allocated = 0; @@ -1375,7 +1396,7 @@ exit: return allocated; } -bool BitAllocator::is_allocated_dis(ExtentList *blocks, int64_t num_blocks) +bool BitAllocator::is_allocated_dis(AllocatorExtentList *blocks, int64_t num_blocks) { int64_t count = 0; for (int64_t j = 0; j < blocks->get_extent_count(); j++) { @@ -1390,7 +1411,7 @@ bool BitAllocator::is_allocated_dis(ExtentList *blocks, int64_t num_blocks) return true; } -void BitAllocator::free_blocks_dis(int64_t num_blocks, ExtentList *block_list) +void BitAllocator::free_blocks_dis(int64_t num_blocks, AllocatorExtentList *block_list) { int64_t freed = 0; lock_shared(); diff --git a/src/os/bluestore/BitAllocator.h b/src/os/bluestore/BitAllocator.h index 90d9e862cf6..1613059c861 100644 --- a/src/os/bluestore/BitAllocator.h +++ b/src/os/bluestore/BitAllocator.h @@ -16,6 +16,7 @@ #include #include #include "include/intarith.h" +#include "os/bluestore/Allocator.h" #include "os/bluestore/bluestore_types.h" #define alloc_assert assert @@ -26,6 +27,49 @@ #define alloc_dbg_assert(x) (static_cast (0)) #endif +class AllocatorExtentList { + PExtentVector *m_extents; + int64_t m_block_size; + int64_t m_max_blocks; + +public: + void init(PExtentVector *extents, int64_t block_size, + uint64_t max_alloc_size) { + m_extents = extents; + m_block_size = block_size; + m_max_blocks = max_alloc_size / block_size; + assert(m_extents->empty()); + } + + AllocatorExtentList(PExtentVector *extents, int64_t block_size) { + init(extents, block_size, 0); + } + + AllocatorExtentList(PExtentVector *extents, int64_t block_size, + uint64_t max_alloc_size) { + init(extents, block_size, max_alloc_size); + } + + void reset() { + m_extents->clear(); + } + + void add_extents(int64_t start, int64_t count); + + PExtentVector *get_extents() { + return m_extents; + } + + std::pair get_nth_extent(int index) { + return std::make_pair + ((*m_extents)[index].offset / m_block_size, + (*m_extents)[index].length / m_block_size); + } + + int64_t get_extent_count() { + return m_extents->size(); + } +}; class BitAllocatorStats { public: @@ -233,7 +277,7 @@ public: virtual void shutdown() = 0; virtual int64_t alloc_blocks_dis(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t blk_off, ExtentList *block_list) { + int64_t hint, int64_t blk_off, AllocatorExtentList *block_list) { ceph_abort(); return 0; } @@ -375,7 +419,7 @@ public: ~BitMapZone() override; void shutdown() override; int64_t alloc_blocks_dis(int64_t num_blocks, int64_t min_alloc, int64_t hint, - int64_t blk_off, ExtentList *block_list) override; + int64_t blk_off, AllocatorExtentList *block_list) override; void set_blocks_used(int64_t start_block, int64_t num_blocks) override; void free_blocks(int64_t start_block, int64_t num_blocks) override; @@ -421,7 +465,7 @@ protected: int64_t zone_size_block, bool def); int64_t alloc_blocks_dis_int_work(bool wrap, int64_t num_blocks, int64_t min_alloc, int64_t hint, - int64_t blk_off, ExtentList *block_list); + int64_t blk_off, AllocatorExtentList *block_list); int64_t alloc_blocks_int_work(bool wait, bool wrap, int64_t num_blocks, int64_t hint, int64_t *start_block); @@ -448,9 +492,9 @@ public: using BitMapArea::alloc_blocks_dis; //non-wait version virtual int64_t alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, int64_t hint, - int64_t blk_off, ExtentList *block_list); + int64_t blk_off, AllocatorExtentList *block_list); int64_t alloc_blocks_dis(int64_t num_blocks, int64_t min_alloc, int64_t hint, - int64_t blk_off, ExtentList *block_list) override; + int64_t blk_off, AllocatorExtentList *block_list) override; virtual void set_blocks_used_int(int64_t start_block, int64_t num_blocks); void set_blocks_used(int64_t start_block, int64_t num_blocks) override; @@ -484,7 +528,7 @@ public: int64_t alloc_blocks_int(int64_t num_blocks, int64_t hint, int64_t *start_block); int64_t alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, int64_t hint, - int64_t blk_off, ExtentList *block_list) override; + int64_t blk_off, AllocatorExtentList *block_list) override; void free_blocks_int(int64_t start_block, int64_t num_blocks) override; ~BitMapAreaLeaf() override; @@ -526,10 +570,10 @@ private: bool check_input_dis(int64_t num_blocks); void init_check(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode, bool def, bool stats_on); - int64_t alloc_blocks_dis_work(int64_t num_blocks, int64_t min_alloc, int64_t hint, ExtentList *block_list, bool reserved); + int64_t alloc_blocks_dis_work(int64_t num_blocks, int64_t min_alloc, int64_t hint, AllocatorExtentList *block_list, bool reserved); int64_t alloc_blocks_dis_int(int64_t num_blocks, int64_t min_alloc, - int64_t hint, int64_t area_blk_off, ExtentList *block_list) override; + int64_t hint, int64_t area_blk_off, AllocatorExtentList *block_list) override; public: MEMPOOL_CLASS_HELPERS(); @@ -548,10 +592,10 @@ public: void set_blocks_used(int64_t start_block, int64_t num_blocks) override; void unreserve_blocks(int64_t blocks); - int64_t alloc_blocks_dis_res(int64_t num_blocks, int64_t min_alloc, int64_t hint, ExtentList *block_list); + int64_t alloc_blocks_dis_res(int64_t num_blocks, int64_t min_alloc, int64_t hint, AllocatorExtentList *block_list); - void free_blocks_dis(int64_t num_blocks, ExtentList *block_list); - bool is_allocated_dis(ExtentList *blocks, int64_t num_blocks); + void free_blocks_dis(int64_t num_blocks, AllocatorExtentList *block_list); + bool is_allocated_dis(AllocatorExtentList *blocks, int64_t num_blocks); int64_t total_blocks() const { return m_total_blocks - m_extra_blocks; diff --git a/src/os/bluestore/BitMapAllocator.cc b/src/os/bluestore/BitMapAllocator.cc index 9c07648b5f4..90395e6712e 100644 --- a/src/os/bluestore/BitMapAllocator.cc +++ b/src/os/bluestore/BitMapAllocator.cc @@ -133,7 +133,7 @@ int64_t BitMapAllocator::allocate_dis( uint64_t want_size, uint64_t alloc_unit, uint64_t max_alloc_size, int64_t hint, PExtentVector *extents) { - ExtentList block_list = ExtentList(extents, m_block_size, max_alloc_size); + AllocatorExtentList block_list(extents, m_block_size, max_alloc_size); int64_t nblks = (want_size + m_block_size - 1) / m_block_size; int64_t num = 0; diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index f8ef54eb7c8..706b00ea004 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -219,8 +219,6 @@ int64_t StupidAllocator::allocate( max_alloc_size = want_size; } - ExtentList block_list = ExtentList(extents, 1, max_alloc_size); - while (allocated_size < want_size) { res = allocate_int(std::min(max_alloc_size, (want_size - allocated_size)), alloc_unit, hint, &offset, &length); @@ -230,7 +228,19 @@ int64_t StupidAllocator::allocate( */ break; } - block_list.add_extents(offset, length); + bool can_append = true; + if (!extents->empty()) { + bluestore_pextent_t &last_extent = extents->back(); + if ((last_extent.end() == offset) && + ((last_extent.length + length) <= max_alloc_size)) { + can_append = false; + last_extent.length += length; + } + } + if (can_append) { + extents->emplace_back(bluestore_pextent_t(offset, length)); + } + allocated_size += length; hint = offset + length; } diff --git a/src/os/bluestore/bluestore_types.cc b/src/os/bluestore/bluestore_types.cc index 32bcfa8db39..77a39882609 100644 --- a/src/os/bluestore/bluestore_types.cc +++ b/src/os/bluestore/bluestore_types.cc @@ -17,28 +17,6 @@ #include "common/Checksummer.h" #include "include/stringify.h" -void ExtentList::add_extents(int64_t start, int64_t count) { - bluestore_pextent_t *last_extent = NULL; - bool can_merge = false; - - if (!m_extents->empty()) { - last_extent = &(m_extents->back()); - uint64_t last_offset = last_extent->end() / m_block_size; - uint32_t last_length = last_extent->length / m_block_size; - if ((last_offset == (uint64_t) start) && - (!m_max_blocks || (last_length + count) <= m_max_blocks)) { - can_merge = true; - } - } - - if (can_merge) { - last_extent->length += (count * m_block_size); - } else { - m_extents->emplace_back(bluestore_pextent_t(start * m_block_size, - count * m_block_size)); - } -} - // bluestore_bdev_label_t void bluestore_bdev_label_t::encode(bufferlist& bl) const diff --git a/src/os/bluestore/bluestore_types.h b/src/os/bluestore/bluestore_types.h index b7df3db81ab..66d68ab2fcc 100644 --- a/src/os/bluestore/bluestore_types.h +++ b/src/os/bluestore/bluestore_types.h @@ -134,51 +134,6 @@ struct denc_traits { } }; - -class ExtentList { - PExtentVector *m_extents; - int64_t m_block_size; - int64_t m_max_blocks; - -public: - void init(PExtentVector *extents, int64_t block_size, - uint64_t max_alloc_size) { - m_extents = extents; - m_block_size = block_size; - m_max_blocks = max_alloc_size / block_size; - assert(m_extents->empty()); - } - - ExtentList(PExtentVector *extents, int64_t block_size) { - init(extents, block_size, 0); - } - - ExtentList(PExtentVector *extents, int64_t block_size, - uint64_t max_alloc_size) { - init(extents, block_size, max_alloc_size); - } - - void reset() { - m_extents->clear(); - } - - void add_extents(int64_t start, int64_t count); - - PExtentVector *get_extents() { - return m_extents; - } - - std::pair get_nth_extent(int index) { - return std::make_pair - ((*m_extents)[index].offset / m_block_size, - (*m_extents)[index].length / m_block_size); - } - - int64_t get_extent_count() { - return m_extents->size(); - } -}; - /// extent_map: a map of reference counted extents struct bluestore_extent_ref_map_t { struct record_t { diff --git a/src/test/objectstore/BitAllocator_test.cc b/src/test/objectstore/BitAllocator_test.cc index 8858885583d..caaa2e9a8b8 100644 --- a/src/test/objectstore/BitAllocator_test.cc +++ b/src/test/objectstore/BitAllocator_test.cc @@ -284,22 +284,22 @@ TEST(BitAllocator, test_zone_alloc) int64_t blk_size = 1024; PExtentVector extents; - std::unique_ptr block_list = std::make_unique(&extents, blk_size); - allocated = zone->alloc_blocks_dis(zone->size() / 2, 1, 0, 0, block_list.get()); + AllocatorExtentList block_list(&extents, blk_size); + allocated = zone->alloc_blocks_dis(zone->size() / 2, 1, 0, 0, &block_list); bmap_test_assert(allocated == zone->size() / 2); { int64_t blk_size = 1024; PExtentVector extents; - std::unique_ptr block_list = std::make_unique(&extents, blk_size); + AllocatorExtentList block_list(&extents, blk_size); zone = std::make_unique(g_ceph_context, total_blocks, 0); lock = zone->lock_excl_try(); bmap_test_assert(lock); for (int i = 0; i < zone->size(); i += 4) { - block_list->reset(); - allocated = zone->alloc_blocks_dis(1, 1, i, 0, block_list.get()); + block_list.reset(); + allocated = zone->alloc_blocks_dis(1, 1, i, 0, &block_list); bmap_test_assert(allocated == 1); EXPECT_EQ(extents[0].offset, (uint64_t) i * blk_size); } @@ -319,24 +319,23 @@ TEST(BitAllocator, test_zone_alloc) for (int i = 1; i <= total_blocks - BmapEntry::size(); i = i << 1) { for (int64_t j = 0; j <= BmapEntry::size(); j = 1 << j) { extents.clear(); - ExtentList *block_list = new ExtentList(&extents, blk_size); + AllocatorExtentList block_list(&extents, blk_size); zone = std::make_unique(g_ceph_context, total_blocks, 0); lock = zone->lock_excl_try(); bmap_test_assert(lock); - block_list->reset(); + block_list.reset(); int64_t need_blks = (((total_blocks - j) / i) * i); - allocated = zone->alloc_blocks_dis(need_blks, i, j, 0, block_list); + allocated = zone->alloc_blocks_dis(need_blks, i, j, 0, &block_list); bmap_test_assert(allocated == need_blks); bmap_test_assert(extents[0].offset == (uint64_t) j); - delete block_list; } } //allocation in loop { extents.clear(); - ExtentList *block_list = new ExtentList(&extents, blk_size); + AllocatorExtentList block_list(&extents, blk_size); zone = std::make_unique(g_ceph_context, total_blocks, 0); lock = zone->lock_excl_try(); @@ -344,17 +343,17 @@ TEST(BitAllocator, test_zone_alloc) for (int i = 1; i <= total_blocks; i = i << 1) { for (int j = 0; j < total_blocks; j +=i) { bmap_test_assert(lock); - block_list->reset(); + block_list.reset(); int64_t need_blks = i; - allocated = zone->alloc_blocks_dis(need_blks, i, 0, 0, block_list); + allocated = zone->alloc_blocks_dis(need_blks, i, 0, 0, &block_list); bmap_test_assert(allocated == need_blks); bmap_test_assert(extents[0].offset == (uint64_t) j); - block_list->reset(); + block_list.reset(); } { - allocated = zone->alloc_blocks_dis(1, 1, 0, 0, block_list); + allocated = zone->alloc_blocks_dis(1, 1, 0, 0, &block_list); bmap_test_assert(allocated == 0); - block_list->reset(); + block_list.reset(); } for (int j = 0; j < total_blocks; j +=i) { @@ -362,34 +361,33 @@ TEST(BitAllocator, test_zone_alloc) } } } - delete block_list; } { extents.clear(); - std::unique_ptr block_list(new ExtentList(&extents, blk_size)); + AllocatorExtentList block_list(&extents, blk_size); zone = std::make_unique(g_ceph_context, total_blocks, 0); lock = zone->lock_excl_try(); bmap_test_assert(lock); - block_list->reset(); - allocated = zone->alloc_blocks_dis(total_blocks + 1, total_blocks + 1, 0, 1024, block_list.get()); + block_list.reset(); + allocated = zone->alloc_blocks_dis(total_blocks + 1, total_blocks + 1, 0, 1024, &block_list); bmap_test_assert(allocated == 0); - block_list->reset(); - allocated = zone->alloc_blocks_dis(total_blocks, total_blocks, 1, 1024, block_list.get()); + block_list.reset(); + allocated = zone->alloc_blocks_dis(total_blocks, total_blocks, 1, 1024, &block_list); bmap_test_assert(allocated == 0); - block_list->reset(); - allocated = zone->alloc_blocks_dis(total_blocks, total_blocks, 0, 0, block_list.get()); + block_list.reset(); + allocated = zone->alloc_blocks_dis(total_blocks, total_blocks, 0, 0, &block_list); bmap_test_assert(allocated == total_blocks); bmap_test_assert(extents[0].offset == 0); zone->free_blocks(extents[0].offset, allocated); extents.clear(); - block_list = std::make_unique(&extents, blk_size, total_blocks / 4 * blk_size); - allocated = zone->alloc_blocks_dis(total_blocks, total_blocks / 4, 0, 0, block_list.get()); + block_list = AllocatorExtentList(&extents, blk_size, total_blocks / 4 * blk_size); + allocated = zone->alloc_blocks_dis(total_blocks, total_blocks / 4, 0, 0, &block_list); bmap_test_assert(allocated == total_blocks); for (int i = 0; i < 4; i++) { bmap_test_assert(extents[i].offset == (uint64_t) i * (total_blocks / 4)); @@ -426,18 +424,18 @@ TEST(BitAllocator, test_bmap_alloc) for (int64_t j = 0; alloc_size <= total_blocks; j++) { int64_t blk_size = 1024; PExtentVector extents; - std::unique_ptr block_list = std::make_unique(&extents, blk_size, alloc_size); + AllocatorExtentList block_list(&extents, blk_size, alloc_size); for (int64_t i = 0; i < total_blocks; i += alloc_size) { bmap_test_assert(alloc->reserve_blocks(alloc_size) == true); allocated = alloc->alloc_blocks_dis_res(alloc_size, std::min(alloc_size, zone_size), - 0, block_list.get()); + 0, &block_list); bmap_test_assert(alloc_size == allocated); - bmap_test_assert(block_list->get_extent_count() == + bmap_test_assert(block_list.get_extent_count() == (alloc_size > zone_size? alloc_size / zone_size: 1)); bmap_test_assert(extents[0].offset == (uint64_t) i * blk_size); bmap_test_assert((int64_t) extents[0].length == ((alloc_size > zone_size? zone_size: alloc_size) * blk_size)); - block_list->reset(); + block_list.reset(); } for (int64_t i = 0; i < total_blocks; i += alloc_size) { alloc->free_blocks(i, alloc_size); @@ -449,27 +447,26 @@ TEST(BitAllocator, test_bmap_alloc) int64_t blk_size = 1024; PExtentVector extents; - ExtentList *block_list = new ExtentList(&extents, blk_size); + AllocatorExtentList block_list(&extents, blk_size); ASSERT_EQ(alloc->reserve_blocks(alloc->size() / 2), true); - allocated = alloc->alloc_blocks_dis_res(alloc->size()/2, 1, 0, block_list); + allocated = alloc->alloc_blocks_dis_res(alloc->size()/2, 1, 0, &block_list); ASSERT_EQ(alloc->size()/2, allocated); - block_list->reset(); + block_list.reset(); ASSERT_EQ(alloc->reserve_blocks(1), true); - allocated = alloc->alloc_blocks_dis_res(1, 1, 0, block_list); + allocated = alloc->alloc_blocks_dis_res(1, 1, 0, &block_list); bmap_test_assert(allocated == 1); alloc->free_blocks(alloc->size()/2, 1); - block_list->reset(); + block_list.reset(); ASSERT_EQ(alloc->reserve_blocks(1), true); - allocated = alloc->alloc_blocks_dis_res(1, 1, 0, block_list); + allocated = alloc->alloc_blocks_dis_res(1, 1, 0, &block_list); bmap_test_assert(allocated == 1); bmap_test_assert((int64_t) extents[0].offset == alloc->size()/2 * blk_size); - delete block_list; delete alloc; } @@ -490,10 +487,10 @@ bool alloc_extents_max_block(BitAllocator *alloc, int64_t count = 0; PExtentVector extents; - std::unique_ptr block_list = std::make_unique(&extents, blk_size, max_alloc); + AllocatorExtentList block_list(&extents, blk_size, max_alloc); EXPECT_EQ(alloc->reserve_blocks(total_alloc), true); - allocated = alloc->alloc_blocks_dis_res(total_alloc, blk_size, 0, block_list.get()); + allocated = alloc->alloc_blocks_dis_res(total_alloc, blk_size, 0, &block_list); EXPECT_EQ(allocated, total_alloc); max_alloc = total_alloc > max_alloc? max_alloc: total_alloc; @@ -534,16 +531,16 @@ do_work_dis(BitAllocator *alloc) int64_t num_blocks = alloc->size() / NUM_THREADS; PExtentVector extents; - std::unique_ptr block_list = std::make_unique(&extents, 4096); + AllocatorExtentList block_list(&extents, 4096); while (num_iters--) { alloc_assert(alloc->reserve_blocks(num_blocks)); - alloced = alloc->alloc_blocks_dis_res(num_blocks, 1, 0, block_list.get()); + alloced = alloc->alloc_blocks_dis_res(num_blocks, 1, 0, &block_list); alloc_assert(alloced == num_blocks); - alloc_assert(alloc->is_allocated_dis(block_list.get(), num_blocks)); - alloc->free_blocks_dis(num_blocks, block_list.get()); - block_list.get()->reset(); + alloc_assert(alloc->is_allocated_dis(&block_list, num_blocks)); + alloc->free_blocks_dis(num_blocks, &block_list); + block_list.reset(); } }