uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB
al2.init(capacity, 0x1000);
std::cout << "Init L2 Unaligned" << std::endl;
+
for (uint64_t i = 0; i < capacity; i += _1m / 2) {
uint64_t allocated4 = 0;
interval_vector_t a4;
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 3;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB
- al2.init(capacity, 0x1000);
+ uint64_t num_chunks = capacity / 4096;
+ al2.init(capacity, 4096);
std::cout << "Init L2 cont aligned" << std::endl;
+
+ std::map<size_t, size_t> bins_overall;
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 1u);
+// std::cout<<bins_overall.begin()->first << std::endl;
+ ASSERT_EQ(bins_overall[cbits(num_chunks) - 1], 1u);
+
for (uint64_t i = 0; i < capacity / 2; i += _1m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
}
ASSERT_EQ(capacity / 2, al2.debug_get_free());
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
+
{
+ size_t to_release = 2 * _1m + 0x1000;
// release 2M + 4K at the beginning
interval_vector_t r;
- r.emplace_back(0, 2 * _1m + 0x1000);
+ r.emplace_back(0, to_release);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits(to_release / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 4K within the deallocated range
ASSERT_EQ(allocated4, 0x1000u);
ASSERT_EQ(a4[0].offset, 0u);
ASSERT_EQ(a4[0].length, 0x1000u);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits(2 * _1m / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 1M - should go to the second 1M chunk
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, _1m);
ASSERT_EQ(a4[0].length, _1m);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[0], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// and allocate yet another 8K within the deallocated range
ASSERT_EQ(allocated4, 0x2000u);
ASSERT_EQ(a4[0].offset, 0x1000u);
ASSERT_EQ(a4[0].length, 0x2000u);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall[0], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// release just allocated 1M
interval_vector_t r;
r.emplace_back(_1m, _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 2u);
+ ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 3M - should go to the second 1M chunk and @capacity/2
ASSERT_EQ(a4[0].length, _1m);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 2 * _1m);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[0], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
// release allocated 1M in the second meg chunk except
interval_vector_t r;
r.emplace_back(_1m + 0x1000, _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
// release 2M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 2 * _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u);
}
{
- // allocate 3x512K - should go to the second halves of
+ // allocate 4x512K - should go to the second halves of
// the first and second 1M chunks and @(capacity / 2)
uint64_t allocated4 = 0;
interval_vector_t a4;
ASSERT_EQ(a4[1].length, _1m / 2);
ASSERT_EQ(a4[2].offset, capacity / 2);
ASSERT_EQ(a4[2].length, _1m);
+
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[0], 1u);
+ // below we have 512K - 4K & 512K - 12K chunks which both fit into
+ // the same bin = 6
+ ASSERT_EQ(bins_overall[6], 2u);
+ ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
+
}
{
// cleanup first 2M except except the last 4K chunk
interval_vector_t r;
r.emplace_back(0, 2 * _1m - 0x1000);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[0], 1u);
+ ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
}
{
// release 2M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 2 * _1m);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 3u);
+ ASSERT_EQ(bins_overall[0], 1u);
+ ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
+ ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 132M using 4M granularity should go to (capacity / 2)
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(a4[0].offset, capacity / 2);
ASSERT_EQ(a4[0].length, 132 * _1m);
+
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+ ASSERT_EQ(bins_overall.size(), 3u);
}
{
// cleanup left 4K chunk in the first 2M
interval_vector_t r;
r.emplace_back(2 * _1m - 0x1000, 0x1000);
al2.free_l2(r);
+ bins_overall.clear();
+ al2.collect_stats(bins_overall);
+
+ ASSERT_EQ(bins_overall.size(), 2u);
}
{
// release 132M @(capacity / 2)
}
}
-
std::cout << "Done L2 cont aligned" << std::endl;
}
+