Option("bluefs_allocator", Option::TYPE_STR, Option::LEVEL_DEV)
.set_default("bitmap")
+ .set_enum_allowed({"bitmap", "stupid", "avl", "hybrid"})
.set_description(""),
Option("bluefs_preextend_wal_files", Option::TYPE_BOOL, Option::LEVEL_ADVANCED)
Option("bluestore_allocator", Option::TYPE_STR, Option::LEVEL_ADVANCED)
.set_default("bitmap")
- .set_enum_allowed({"bitmap", "stupid", "avl"})
+ .set_enum_allowed({"bitmap", "stupid", "avl", "hybrid"})
.set_description("Allocator policy")
.set_long_description("Allocator to use for bluestore. Stupid should only be used for testing."),
bluestore/StupidAllocator.cc
bluestore/BitmapAllocator.cc
bluestore/AvlAllocator.cc
+ bluestore/HybridAllocator.cc
)
endif(WITH_BLUESTORE)
#include "StupidAllocator.h"
#include "BitmapAllocator.h"
#include "AvlAllocator.h"
+#include "HybridAllocator.h"
#include "common/debug.h"
#include "common/admin_socket.h"
#define dout_subsys ceph_subsys_bluestore
class Allocator::SocketHook : public AdminSocketHook {
Allocator *alloc;
+ friend class Allocator;
std::string name;
public:
explicit SocketHook(Allocator *alloc,
delete asok_hook;
}
+const string& Allocator::get_name() const {
+ return asok_hook->name;
+}
Allocator *Allocator::create(CephContext* cct, string type,
int64_t size, int64_t block_size, const std::string& name)
alloc = new BitmapAllocator(cct, size, block_size, name);
} else if (type == "avl") {
return new AvlAllocator(cct, size, block_size, name);
+ } else if (type == "hybrid") {
+ return new HybridAllocator(cct, size, block_size, 1024 /*FIXME*/, name);
}
if (alloc == nullptr) {
lderr(cct) << "Allocator::" << __func__ << " unknown alloc type "
static Allocator *create(CephContext* cct, string type, int64_t size,
int64_t block_size, const std::string& name = "");
+
+ const string& get_name() const;
+
private:
class SocketHook;
SocketHook* asok_hook = nullptr;
void release(
const interval_set<uint64_t>& release_set) override;
+ using Allocator::release;
+
uint64_t get_free() override
{
return get_available();
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "HybridAllocator.h"
+
+#include <limits>
+
+#include "common/config_proxy.h"
+#include "common/debug.h"
+
+#define dout_context cct
+#define dout_subsys ceph_subsys_bluestore
+#undef dout_prefix
+#define dout_prefix *_dout << "HybridAllocator "
+
+
+int64_t HybridAllocator::allocate(
+ uint64_t want,
+ uint64_t unit,
+ uint64_t max_alloc_size,
+ int64_t hint,
+ PExtentVector* extents)
+{
+ ldout(cct, 10) << __func__ << std::hex
+ << " want 0x" << want
+ << " unit 0x" << unit
+ << " max_alloc_size 0x" << max_alloc_size
+ << " hint 0x" << hint
+ << std::dec << dendl;
+ ceph_assert(isp2(unit));
+ ceph_assert(want % unit == 0);
+
+ if (max_alloc_size == 0) {
+ max_alloc_size = want;
+ }
+ if (constexpr auto cap = std::numeric_limits<decltype(bluestore_pextent_t::length)>::max();
+ max_alloc_size >= cap) {
+ max_alloc_size = p2align(uint64_t(cap), (uint64_t)get_block_size());
+ }
+
+ std::lock_guard l(lock);
+
+ int64_t res;
+ PExtentVector local_extents;
+
+ // preserve original 'extents' vector state
+ auto orig_size = extents->size();
+ auto orig_pos = extents->end();
+ if (orig_size) {
+ --orig_pos;
+ }
+
+ // try bitmap first to avoid unneeded contiguous extents split if
+ // desired amount is less than shortes range in AVL
+ if (bmap_alloc && bmap_alloc->get_free() &&
+ want < _lowest_size_available()) {
+ res = bmap_alloc->allocate(want, unit, max_alloc_size, hint, extents);
+ if (res < 0) {
+ // got a failure, release already allocated and
+ // start over allocation from avl
+ if (orig_size) {
+ local_extents.insert(
+ local_extents.end(), ++orig_pos, extents->end());
+ extents->resize(orig_size);
+ } else {
+ extents->swap(local_extents);
+ }
+ bmap_alloc->release(local_extents);
+ res = 0;
+ }
+ if ((uint64_t)res < want) {
+ auto res2 = _allocate(want - res, unit, max_alloc_size, hint, extents);
+ if (res2 < 0) {
+ res = res2; // caller to do the release
+ } else {
+ res += res2;
+ }
+ }
+ } else {
+ res = _allocate(want, unit, max_alloc_size, hint, extents);
+ if (res < 0) {
+ // got a failure, release already allocated and
+ // start over allocation from bitmap
+ if (orig_size) {
+ local_extents.insert(
+ local_extents.end(), ++orig_pos, extents->end());
+ extents->resize(orig_size);
+ } else {
+ extents->swap(local_extents);
+ }
+ _release(local_extents);
+ res = 0;
+ }
+ if ((uint64_t)res < want ) {
+ auto res2 = bmap_alloc ?
+ bmap_alloc->allocate(want - res, unit, max_alloc_size, hint, extents) :
+ 0;
+ if (res2 < 0 ) {
+ res = res2; // caller to do the release
+ } else {
+ res += res2;
+ }
+ }
+ }
+ return res ? res : -ENOSPC;
+}
+
+void HybridAllocator::release(const interval_set<uint64_t>& release_set) {
+ std::lock_guard l(lock);
+ // this will attempt to put free ranges into AvlAllocator first and
+ // fallback to bitmap one via _try_insert_range call
+ _release(release_set);
+}
+
+uint64_t HybridAllocator::get_free()
+{
+ std::lock_guard l(lock);
+ return (bmap_alloc ? bmap_alloc->get_free() : 0) + _get_free();
+}
+
+double HybridAllocator::get_fragmentation()
+{
+ std::lock_guard l(lock);
+ auto f = AvlAllocator::_get_fragmentation();
+ auto bmap_free = bmap_alloc ? bmap_alloc->get_free() : 0;
+ if (bmap_free) {
+ auto _free = _get_free() + bmap_free;
+ auto bf = bmap_alloc->get_fragmentation();
+
+ f = f * _get_free() / _free + bf * bmap_free / _free;
+ }
+ return f;
+}
+
+void HybridAllocator::dump()
+{
+ std::lock_guard l(lock);
+ AvlAllocator::_dump();
+ if (bmap_alloc) {
+ bmap_alloc->dump();
+ }
+ ldout(cct, 0) << __func__
+ << " avl_free: " << _get_free()
+ << " bmap_free: " << (bmap_alloc ? bmap_alloc->get_free() : 0)
+ << dendl;
+}
+
+void HybridAllocator::dump(std::function<void(uint64_t offset, uint64_t length)> notify)
+{
+ AvlAllocator::dump(notify);
+ if (bmap_alloc) {
+ bmap_alloc->dump(notify);
+ }
+}
+
+void HybridAllocator::init_add_free(uint64_t offset, uint64_t length)
+{
+ std::lock_guard l(lock);
+ ldout(cct, 10) << __func__ << std::hex
+ << " offset 0x" << offset
+ << " length 0x" << length
+ << std::dec << dendl;
+ _add_to_tree(offset, length);
+}
+
+void HybridAllocator::init_rm_free(uint64_t offset, uint64_t length)
+{
+ std::lock_guard l(lock);
+ ldout(cct, 10) << __func__ << std::hex
+ << " offset 0x" << offset
+ << " length 0x" << length
+ << std::dec << dendl;
+ _try_remove_from_tree(offset, length,
+ [&](uint64_t o, uint64_t l, bool found) {
+ if (!found) {
+ if (bmap_alloc) {
+ bmap_alloc->init_rm_free(o, l);
+ } else {
+ lderr(cct) << "init_rm_free lambda" << std::hex
+ << "Uexpected extent: "
+ << " 0x" << o << "~" << l
+ << std::dec << dendl;
+ ceph_assert(false);
+ }
+ }
+ });
+}
+
+void HybridAllocator::shutdown()
+{
+ std::lock_guard l(lock);
+ _shutdown();
+ if (bmap_alloc) {
+ bmap_alloc->shutdown();
+ delete bmap_alloc;
+ bmap_alloc = nullptr;
+ }
+}
+
+void HybridAllocator::_spillover_range(uint64_t start, uint64_t end) {
+ auto size = end - start;
+ dout(20) << __func__
+ << std::hex << " "
+ << start << "~" << size
+ << std::dec
+ << dendl;
+ ceph_assert(size);
+ if (!bmap_alloc) {
+ dout(1) << __func__
+ << std::hex
+ << " constructing fallback allocator"
+ << dendl;
+ bmap_alloc = new BitmapAllocator(cct,
+ get_capacity(),
+ get_block_size(),
+ get_name());
+ }
+ bmap_alloc->init_add_free(start, size);
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+
+#include <mutex>
+
+#include "AvlAllocator.h"
+#include "BitmapAllocator.h"
+
+class HybridAllocator : public AvlAllocator {
+ BitmapAllocator* bmap_alloc = nullptr;
+public:
+ HybridAllocator(CephContext* cct, int64_t device_size, int64_t _block_size,
+ uint64_t max_entries,
+ const std::string& name) :
+ AvlAllocator(cct, device_size, _block_size, max_entries, name) {
+ }
+ int64_t allocate(
+ uint64_t want,
+ uint64_t unit,
+ uint64_t max_alloc_size,
+ int64_t hint,
+ PExtentVector *extents) override;
+ void release(const interval_set<uint64_t>& release_set) override;
+ uint64_t get_free() override;
+ double get_fragmentation() override;
+
+ void dump() override;
+ void dump(std::function<void(uint64_t offset, uint64_t length)> notify) override;
+ void init_add_free(uint64_t offset, uint64_t length) override;
+ void init_rm_free(uint64_t offset, uint64_t length) override;
+ void shutdown() override;
+
+protected:
+ // intended primarily for UT
+ BitmapAllocator* get_bmap() {
+ return bmap_alloc;
+ }
+ const BitmapAllocator* get_bmap() const {
+ return bmap_alloc;
+ }
+private:
+
+ void _spillover_range(uint64_t start, uint64_t end) override;
+};
ASSERT_EQ(mempool::bluestore_alloc::allocated_items(), items);
}
-INSTANTIATE_TEST_SUITE_P(
+INSTANTIATE_TEST_CASE_P(
Allocator,
AllocTest,
- ::testing::Values("stupid", "bitmap", "avl"));
+ ::testing::Values("stupid", "bitmap", "avl", "hybrid"));
#else
#include "include/Context.h"
#include "os/bluestore/Allocator.h"
-#include <boost/random/uniform_int.hpp>
typedef boost::mt11213b gen_type;
#if GTEST_HAS_PARAM_TEST
EXPECT_EQ(0.0, alloc->get_fragmentation());
}
}
+ tmp.clear();
EXPECT_EQ(-ENOSPC, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
if (GetParam() == string("avl")) {
// AVL allocator uses a different allocating strategy
cout << "skipping for AVL allocator";
return;
+ } else if (GetParam() == string("hybrid")) {
+ // AVL allocator uses a different allocating strategy
+ cout << "skipping for Hybrid allocator";
+ return;
}
for (size_t i = 0; i < allocated.size(); i += 2)
EXPECT_EQ(need,
alloc->allocate(need, mas, 0, &extents));
need = block_size * blocks / 4; // 2GB
+ extents.clear();
EXPECT_EQ(need,
alloc->allocate(need, mas, 0, &extents));
EXPECT_TRUE(extents[0].length > 0);
EXPECT_TRUE(extents[0].length > 0);
}
+TEST_P(AllocTest, test_alloc_contiguous)
+{
+ int64_t block_size = 0x1000;
+ int64_t capacity = block_size * 1024 * 1024;
+
+ {
+ init_alloc(capacity, block_size);
+
+ alloc->init_add_free(0, capacity);
+ PExtentVector extents;
+ uint64_t need = 4 * block_size;
+ EXPECT_EQ(need,
+ alloc->allocate(need, need,
+ 0, (int64_t)0, &extents));
+ EXPECT_EQ(1u, extents.size());
+ EXPECT_EQ(extents[0].offset, 0);
+ EXPECT_EQ(extents[0].length, 4 * block_size);
+
+ extents.clear();
+ EXPECT_EQ(need,
+ alloc->allocate(need, need,
+ 0, (int64_t)0, &extents));
+ EXPECT_EQ(1u, extents.size());
+ EXPECT_EQ(extents[0].offset, 4 * block_size);
+ EXPECT_EQ(extents[0].length, 4 * block_size);
+ }
+
+ alloc->shutdown();
+}
+
INSTANTIATE_TEST_CASE_P(
Allocator,
AllocTest,
- ::testing::Values("stupid", "bitmap", "avl"));
-
+ ::testing::Values("stupid", "bitmap", "avl", "hybrid"));
#else
TEST(DummyTest, ValueParameterizedTestsAreNotSupportedOnThisPlatform) {}
add_ceph_unittest(unittest_fastbmap_allocator)
target_link_libraries(unittest_fastbmap_allocator os global)
+ set_target_properties(unittest_fastbmap_allocator PROPERTIES COMPILE_FLAGS
+ "${UNITTEST_CXX_FLAGS}")
+
+ add_executable(unittest_hybrid_allocator
+ hybrid_allocator_test.cc
+ $<TARGET_OBJECTS:unit-main>
+ )
+ add_ceph_unittest(unittest_hybrid_allocator)
+ target_link_libraries(unittest_hybrid_allocator os global)
+
+ set_target_properties(unittest_hybrid_allocator PROPERTIES COMPILE_FLAGS
+ "${UNITTEST_CXX_FLAGS}")
+
+ # unittest_bluefs
add_executable(unittest_bluefs
test_bluefs.cc
)
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include <iostream>
+#include <gtest/gtest.h>
+
+#include "os/bluestore/HybridAllocator.h"
+
+class TestHybridAllocator : public HybridAllocator {
+public:
+ TestHybridAllocator(CephContext* cct,
+ int64_t device_size,
+ int64_t _block_size,
+ uint64_t max_entries,
+ const std::string& name) :
+ HybridAllocator(cct, device_size, _block_size,
+ max_entries,
+ name) {
+ }
+
+ uint64_t get_bmap_free() {
+ return get_bmap() ? get_bmap()->get_free() : 0;
+ }
+ uint64_t get_avl_free() {
+ return AvlAllocator::get_free();
+ }
+};
+
+const uint64_t _1m = 1024 * 1024;
+const uint64_t _4m = 4 * 1024 * 1024;
+
+TEST(HybridAllocator, basic)
+{
+ {
+ uint64_t block_size = 0x1000;
+ uint64_t capacity = 0x10000 * _1m; // = 64GB
+ TestHybridAllocator ha(g_ceph_context, capacity, block_size, 4, "test_hybrid_allocator");
+
+ ASSERT_EQ(0, ha.get_free());
+ ASSERT_EQ(0, ha.get_avl_free());
+ ASSERT_EQ(0, ha.get_bmap_free());
+
+ ha.init_add_free(0, _4m);
+ ASSERT_EQ(_4m, ha.get_free());
+ ASSERT_EQ(_4m, ha.get_avl_free());
+ ASSERT_EQ(0, ha.get_bmap_free());
+
+ ha.init_add_free(2 * _4m, _4m);
+ ASSERT_EQ(_4m * 2, ha.get_free());
+ ASSERT_EQ(_4m * 2, ha.get_avl_free());
+ ASSERT_EQ(0, ha.get_bmap_free());
+
+ ha.init_add_free(100 * _4m, _4m);
+ ha.init_add_free(102 * _4m, _4m);
+
+ ASSERT_EQ(_4m * 4, ha.get_free());
+ ASSERT_EQ(_4m * 4, ha.get_avl_free());
+ ASSERT_EQ(0, ha.get_bmap_free());
+
+ // next allocs will go to bitmap
+ ha.init_add_free(4 * _4m, _4m);
+ ASSERT_EQ(_4m * 5, ha.get_free());
+ ASSERT_EQ(_4m * 4, ha.get_avl_free());
+ ASSERT_EQ(_4m * 1, ha.get_bmap_free());
+
+ ha.init_add_free(6 * _4m, _4m);
+ ASSERT_EQ(_4m * 6, ha.get_free());
+ ASSERT_EQ(_4m * 4, ha.get_avl_free());
+ ASSERT_EQ(_4m * 2, ha.get_bmap_free());
+
+ // so we have 6x4M chunks, 4 chunks at AVL and 2 at bitmap
+
+ ha.init_rm_free(_1m, _1m); // take 1M from AVL
+ ASSERT_EQ(_1m * 23, ha.get_free());
+ ASSERT_EQ(_1m * 14, ha.get_avl_free());
+ ASSERT_EQ(_1m * 9, ha.get_bmap_free());
+
+ ha.init_rm_free(6 * _4m + _1m, _1m); // take 1M from bmap
+ ASSERT_EQ(_1m * 22, ha.get_free());
+ ASSERT_EQ(_1m * 14, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8, ha.get_bmap_free());
+
+ // so we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
+ // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
+
+ PExtentVector extents;
+ // allocate 4K, to be served from bitmap
+ EXPECT_EQ(block_size, ha.allocate(block_size, block_size,
+ 0, (int64_t)0, &extents));
+ ASSERT_EQ(1, extents.size());
+ ASSERT_EQ(0, extents[0].offset);
+
+ ASSERT_EQ(_1m * 14, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8 - block_size, ha.get_bmap_free());
+
+ interval_set<uint64_t> release_set;
+ // release 4K, to be returned to bitmap
+ release_set.insert(extents[0].offset, extents[0].length);
+ ha.release(release_set);
+
+ ASSERT_EQ(_1m * 14, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8, ha.get_bmap_free());
+ extents.clear();
+ release_set.clear();
+
+ // again we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
+ // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
+
+ // add 12M~3M which will go to avl
+ ha.init_add_free(3 * _4m, 3 * _1m);
+ ASSERT_EQ(_1m * 17, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8, ha.get_bmap_free());
+
+
+ // add 15M~4K which will be appended to existing slot
+ ha.init_add_free(15 * _1m, 0x1000);
+ ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8, ha.get_bmap_free());
+
+
+ // again we have at avl: 2M~2M, 8M~(7M+4K), 400M~4M , 408M~4M
+ // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
+
+ //some removals from bmap
+ ha.init_rm_free(28 * _1m - 0x1000, 0x1000);
+ ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8 - 0x1000, ha.get_bmap_free());
+
+ ha.init_rm_free(24 * _1m + 0x1000, 0x1000);
+ ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
+ ASSERT_EQ(_1m * 8 - 0x2000, ha.get_bmap_free());
+
+ ha.init_rm_free(24 * _1m + 0x1000, _4m - 0x2000);
+ ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
+ ASSERT_EQ(_1m * 4, ha.get_bmap_free());
+
+ //4K removal from avl
+ ha.init_rm_free(15 * _1m, 0x1000);
+ ASSERT_EQ(_1m * 17, ha.get_avl_free());
+ ASSERT_EQ(_1m * 4, ha.get_bmap_free());
+
+ //remove highest 4Ms from avl
+ ha.init_rm_free(_1m * 400, _4m);
+ ha.init_rm_free(_1m * 408, _4m);
+ ASSERT_EQ(_1m * 9, ha.get_avl_free());
+ ASSERT_EQ(_1m * 4, ha.get_bmap_free());
+
+ // we have at avl: 2M~2M, 8M~7M
+ // and at bmap: 0~1M, 16M~1M, 18M~2M
+
+ // this will go to avl making 16M~4M span broken between both allocators
+ ha.init_add_free(17 * _1m, _1m);
+ ASSERT_EQ(_1m * 10, ha.get_avl_free());
+ ASSERT_EQ(_1m * 4, ha.get_bmap_free());
+
+ // we have at avl: 2M~2M, 8M~7M, 17M~1M
+ // and at bmap: 0~1M, 16M~1M, 18M~2M
+
+ // and now do some cutoffs from this "broken" 16M~4M span
+
+ //cut off 4K from bmap
+ ha.init_rm_free(16 * _1m, 0x1000);
+ ASSERT_EQ(_1m * 10, ha.get_avl_free());
+ ASSERT_EQ(_1m * 4 - 0x1000, ha.get_bmap_free());
+
+ //cut off 1M-4K from bmap and 4K from avl
+ ha.init_rm_free(16 * _1m + 0x1000, _1m);
+ ASSERT_EQ(_1m * 10 - 0x1000, ha.get_avl_free());
+ ASSERT_EQ(_1m * 3, ha.get_bmap_free());
+
+ //cut off 512K avl
+ ha.init_rm_free(17 * _1m + 0x1000, _1m / 2);
+ ASSERT_EQ(_1m * 10 - 0x1000 - _1m / 2, ha.get_avl_free());
+ ASSERT_EQ(_1m * 3, ha.get_bmap_free());
+
+ //cut off the rest from avl and 8K from bmap
+ ha.init_rm_free(17 * _1m + 0x1000 + _1m / 2, _1m / 2 + 0x1000);
+ ASSERT_EQ(_1m * 9, ha.get_avl_free());
+ ASSERT_EQ(_1m * 3 - 0x2000, ha.get_bmap_free());
+ }
+
+ {
+ uint64_t block_size = 0x1000;
+ uint64_t capacity = 0x10000 * _1m; // = 64GB
+ TestHybridAllocator ha(g_ceph_context, capacity, block_size,
+ 4 * sizeof(range_seg_t), "test_hybrid_allocator");
+
+ ha.init_add_free(_1m, _1m);
+ ha.init_add_free(_1m * 3, _1m);
+ ha.init_add_free(_1m * 5, _1m);
+ ha.init_add_free(0x4000, 0x1000);
+
+ ASSERT_EQ(_1m * 3 + 0x1000, ha.get_free());
+ ASSERT_EQ(_1m * 3 + 0x1000, ha.get_avl_free());
+ ASSERT_EQ(0, ha.get_bmap_free());
+
+ // This will substitute chunk 0x4000~1000.
+ // Since new chunk insertion into into AvlAllocator:range_tree
+ // happens immediately before 0x4000~1000 chunk care should be taken
+ // to order operations properly and do not use already disposed iterator.
+ ha.init_add_free(0, 0x2000);
+
+ ASSERT_EQ(_1m * 3 + 0x3000, ha.get_free());
+ ASSERT_EQ(_1m * 3 + 0x2000, ha.get_avl_free());
+ ASSERT_EQ(0x1000, ha.get_bmap_free());
+ }
+}
+
+TEST(HybridAllocator, fragmentation)
+{
+ {
+ uint64_t block_size = 0x1000;
+ uint64_t capacity = 0x1000 * 0x1000; // = 16M
+ TestHybridAllocator ha(g_ceph_context, capacity, block_size,
+ 4 * sizeof(range_seg_t), "test_hybrid_allocator");
+
+ ha.init_add_free(0, 0x2000);
+ ha.init_add_free(0x4000, 0x2000);
+ ha.init_add_free(0x8000, 0x2000);
+ ha.init_add_free(0xc000, 0x1000);
+
+ ASSERT_EQ(0.5, ha.get_fragmentation());
+
+ // this will got to bmap with fragmentation = 1
+ ha.init_add_free(0x10000, 0x1000);
+
+ // which results in the following total fragmentation
+ ASSERT_EQ(0.5 * 7 / 8 + 1.0 / 8, ha.get_fragmentation());
+ }
+}