/*
* Bitmap based in-memory allocator.
* Author: Ramesh Chander, Ramesh.Chander@sandisk.com
+ *
+ * BitMap Tree Design:
+ * Storage is divided into bitmap of blocks. Each bitmap has size of unsigned long.
+ * Group of bitmap creates a Zone. Zone is a unit where at a time single
+ * thread can be active as well as single biggest contiguous allocation that can be requested.
+ *
+ * Rest of the nodes are classified in to three catagories:
+ * root note or Alloctor, internal nodes or BitMapAreaIN and
+ * finally nodes that contains Zones called BitMapAreaLeaf.
+ * This classification is according some their own implmentation of some the interfaces define in
+ * BitMapArea.
*/
#include "BitAllocator.h"
#include <assert.h>
+#include <math.h>
#define debug_assert assert
#define MIN(x, y) ((x) > (y) ? (y) : (x))
+#define MAX_INT16 ((uint16_t) -1 >> 1)
+#define MAX_INT32 ((uint32_t) -1 >> 1)
+
+int64_t BitMapAreaLeaf::count = 0;
+int64_t BitMapZone::count = 0;
+int64_t BitMapZone::total_blocks = BITMAP_SPAN_SIZE;
/*
* BmapEntityList functions.
*/
-BmapEntityListIter::BmapEntityListIter(BmapEntityList *list)
-{
- m_list = list;
- m_start_idx = list->get_marker();
- m_cur_idx = m_start_idx;
- m_wrap = false;
-}
-
-BmapEntityListIter::BmapEntityListIter(BmapEntityList *list, bool wrap)
+void BmapEntityListIter::init(BitMapAreaList *list, int64_t start_idx, bool wrap)
{
m_list = list;
- m_start_idx = list->get_marker();
+ m_start_idx = start_idx;
m_cur_idx = m_start_idx;
m_wrap = wrap;
+ m_wrapped = false;
+ m_end = false;
}
-BmapEntityListIter::BmapEntityListIter(BmapEntityList *list, int64_t start_idx)
+BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx)
{
- m_list = list;
- m_wrap = false;
- m_start_idx = start_idx;
- m_cur_idx = m_start_idx;
- m_wrapped = false;
+ init(list, start_idx, false);
}
-BmapEntityListIter::BmapEntityListIter(BmapEntityList *list, int64_t start_idx, bool wrap)
+BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap)
{
- m_list = list;
- m_wrap = wrap;
- m_start_idx = start_idx;
- m_cur_idx = m_start_idx;
- m_wrapped = false;
+ init(list, start_idx, wrap);
}
-BmapEntity * BmapEntityListIter::next()
+BitMapArea* BmapEntityListIter::next()
{
int64_t cur_idx = m_cur_idx;
if (m_wrapped &&
cur_idx == m_start_idx) {
/*
- * End of wrap cycle
+ * End of wrap cycle + 1
*/
+ if (!m_end) {
+ m_end = true;
+ return m_list->get_nth_item(cur_idx);
+ }
return NULL;
}
m_cur_idx++;
- if (m_cur_idx == m_list->size()
- &&m_wrap) {
+ if (m_cur_idx == m_list->size() &&
+ m_wrap) {
m_cur_idx %= m_list->size();
m_wrapped = true;
}
-
if (cur_idx == m_list->size()) {
/*
* End of list
bmap_t BmapEntry::atomic_fetch()
{
- return std::atomic_load(&m_bits);
+ return m_bits;
}
bool BmapEntry::is_allocated(int64_t start_bit, int64_t num_bits)
void BmapEntry::clear_bit(int bit)
{
bmap_t bmask = bit_mask(bit);
- (void) std::atomic_fetch_and(&m_bits, ~(bmask));
+ m_bits &= ~(bmask);
}
void BmapEntry::clear_bits(int offset, int num_bits)
return;
}
bmap_t bmask = BmapEntry::align_mask(num_bits) >> offset;
- (void) std::atomic_fetch_and(&m_bits, ~(bmask));
+ m_bits &= ~(bmask);
}
void BmapEntry::set_bits(int offset, int num_bits)
}
bmap_t bmask = BmapEntry::align_mask(num_bits) >> offset;
- (void) std::atomic_fetch_or(&m_bits, bmask);
+ m_bits |= bmask;
}
/*
bool BmapEntry::check_n_set_bit(int bit)
{
bmap_t bmask = bit_mask(bit);
- return !(std::atomic_fetch_or(&m_bits, bmask) & bmask);
+ bool res = !(m_bits & bmask);
+ m_bits |= bmask;
+ return res;
}
/*
}
/*
- * Bitmap List related functions.
+ * Zone related functions.
*/
-int64_t BmapList::incr_marker(int64_t add)
+void BitMapZone::init(int64_t zone_num, int64_t total_blocks, bool def)
{
- return std::atomic_fetch_add(&m_marker, add);
+ m_area_index = zone_num;
+ debug_assert(size() > 0);
+ m_type = ZONE;
+
+ m_used_blocks = def? total_blocks: 0;
+
+ int64_t num_bmaps = total_blocks / BmapEntry::size();
+ debug_assert(num_bmaps < MAX_INT16);
+ debug_assert(total_blocks < MAX_INT32);
+ debug_assert(!(total_blocks % BmapEntry::size()));
+
+ std::vector<BmapEntry> *bmaps = new std::vector<BmapEntry> (num_bmaps, BmapEntry(def));
+ m_bmap_list = bmaps;
+ incr_count();
}
-void BmapList::set_marker(int64_t val)
+int64_t BitMapZone::sub_used_blocks(int64_t num_blocks)
{
- std::atomic_store(&m_marker, val);
+ return std::atomic_fetch_sub(&m_used_blocks, (int32_t) num_blocks);
}
-int64_t BmapList::get_marker()
+int64_t BitMapZone::add_used_blocks(int64_t num_blocks)
{
- return std::atomic_load(&m_marker);
+ return std::atomic_fetch_add(&m_used_blocks, (int32_t)num_blocks) + num_blocks;
}
-/*
- * Zone related functions.
- */
-void BitMapZone::init(int64_t zone_num, int64_t total_blocks, bool def)
+int64_t BitMapZone::get_used_blocks()
{
- m_zone_num = zone_num;
- m_total_blocks = total_blocks;
-
- m_used_blocks = def? total_blocks: 0;
-
- int64_t num_bmaps = total_blocks / BmapEntry::size();
- debug_assert(!(total_blocks % BmapEntry::size()));
+ return std::atomic_load(&m_used_blocks);
+}
- BmapEntity **bmaps = new BmapEntity *[num_bmaps];
- for (int i = 0; i < num_bmaps; i++) {
- bmaps[i] = new BmapEntry(def);
- }
+bool BitMapZone::reserve_blocks(int64_t num_blocks)
+{
+ debug_assert(0);
+ return false;
+}
- BmapEntityList *list = new BmapList(bmaps, num_bmaps, 0);
+void BitMapZone::unreserve(int64_t num_blocks, int64_t allocated)
+{
+ debug_assert(0);
+}
- m_bmap_list = list;
- m_state = ZONE_ACTIVE;
+int64_t BitMapZone::get_reserved_blocks()
+{
+ debug_assert(0);
+ return 0;
}
-BitMapZone::BitMapZone(int64_t zone_num, int64_t total_blocks)
+BitMapZone::BitMapZone(int64_t total_blocks, int64_t zone_num)
{
init(zone_num, total_blocks, false);
}
-BitMapZone::BitMapZone(int64_t zone_num, int64_t total_blocks, bool def)
+BitMapZone::BitMapZone(int64_t total_blocks, int64_t zone_num, bool def)
{
init(zone_num, total_blocks, def);
}
-BitMapZone::~BitMapZone()
+void BitMapZone::shutdown()
{
- lock_zone(true);
-
- m_state = ZONE_FREE;
- BmapEntityList *list = m_bmap_list;
-
- for (int64_t i = 0; i < list->size(); i++) {
- BmapEntry *bmap = (BmapEntry *) list->get_nth_item(i);
- delete bmap;
- }
-
- delete [] list->get_item_list();
- delete list;
+}
- unlock_zone();
+BitMapZone::~BitMapZone()
+{
+ delete m_bmap_list;
}
/*
*/
bool BitMapZone::is_exhausted()
{
- if (m_bmap_list->get_marker() >=
- m_total_blocks) {
- m_bmap_list->set_marker(0);
+ debug_assert(check_locked());
+ if (get_used_blocks() == size()) {
return true;
+ } else {
+ return false;
}
- return false;
-}
-
-void BitMapZone::reset_marker()
-{
- m_bmap_list->set_marker(0);
}
bool BitMapZone::is_allocated(int64_t start_block, int64_t num_blocks)
while (num_blocks) {
bit = start_block % BmapEntry::size();
- bmap = (BmapEntry *) m_bmap_list->get_nth_item(start_block /
- BmapEntry::size());
+ bmap = &(*m_bmap_list)[start_block / BmapEntry::size()];
falling_in_bmap = MIN(num_blocks, BmapEntry::size() - bit);
if (!bmap->is_allocated(bit, falling_in_bmap)) {
* marker provided in iter.
*/
int64_t BitMapZone::alloc_cont_bits(int64_t num_blocks,
- BmapEntityListIter *iter,
+ BitMapEntityIter<BmapEntry> *iter,
int64_t *scanned)
{
BmapEntry *bmap = NULL;
int64_t required = num_blocks;
+ debug_assert(check_locked());
while ((bmap = (BmapEntry *) iter->next())) {
int64_t found = 0;
int64_t max_expected = MIN(required, BmapEntry::size());
while (blks) {
bit = start_block % BmapEntry::size();
- bmap = (BmapEntry *) m_bmap_list->get_nth_item(start_block /
- BmapEntry::size());
+ bmap = &(*m_bmap_list)[start_block / BmapEntry::size()];
falling_in_bmap = MIN(blks, BmapEntry::size() - bit);
bmap->set_bits(bit, falling_in_bmap);
int bit = 0;
int64_t falling_in_bmap = 0;
-
while (num_blocks) {
bit = start_block % BmapEntry::size();
- bmap = (BmapEntry *) m_bmap_list->get_nth_item(start_block /
- BmapEntry::size());
+ bmap = &(*m_bmap_list)[start_block / BmapEntry::size()];
falling_in_bmap = MIN(num_blocks, BmapEntry::size() - bit);
bmap->clear_bits(bit, falling_in_bmap);
start_block += falling_in_bmap;
num_blocks -= falling_in_bmap;
}
-
}
-int64_t BitMapZone::get_index()
+void BitMapZone::lock_excl()
{
- return m_zone_num;
+ m_lock.lock();
}
-bool BitMapZone::lock_zone(bool wait)
+bool BitMapZone::lock_excl_try()
{
- if (wait) {
- m_lock.lock();
- return true;
- }
-
if (m_lock.try_lock()) {
return true;
- } else {
- return false;
}
+ return false;
}
-void BitMapZone::unlock_zone()
+void BitMapZone::unlock()
{
m_lock.unlock();
}
-int64_t BitMapZone::get_used_blocks()
-{
- return std::atomic_load(&m_used_blocks);
-}
-
-int64_t BitMapZone::size() {
- return m_total_blocks;
-}
-
-int64_t BitMapZone::add_used_blocks(int64_t blks)
-{
- return std::atomic_fetch_add(&m_used_blocks, blks) + blks;
-}
-
-int64_t BitMapZone::sub_used_blocks(int64_t blks)
+bool BitMapZone::check_locked()
{
- int64_t used_blks = std::atomic_fetch_sub(&m_used_blocks, blks) - blks;
- debug_assert(used_blks >= 0);
- return used_blks;
+ return !lock_excl_try();
}
/*
{
int64_t bmap_idx = 0;
int bit_idx = 0;
- int64_t marker = m_bmap_list->get_marker();
- int64_t marker_add = 0;
BmapEntry *bmap = NULL;
int64_t allocated = 0;
- int64_t zone_block_offset = get_index() * size();
- bmap_idx = marker / BmapEntry::size();
- bit_idx = marker % BmapEntry::size();
+ debug_assert(check_locked());
- BmapEntityListIter iter = BmapEntityListIter(
+ bit_idx = 0;
+ bmap_idx = 0;
+ BitMapEntityIter <BmapEntry> iter = BitMapEntityIter<BmapEntry>(
m_bmap_list, bmap_idx);
while ((bmap = (BmapEntry *) iter.next())) {
allocated = bmap->find_first_set_bits(num_blocks,
bit_idx, &start_offset, &scanned);
- marker_add += scanned;
bit_idx = 0;
if (allocated > 0) {
(*start_block) = start_offset +
- (iter.index() - 1) * bmap->size() +
- m_zone_num * size();
+ (iter.index() - 1) * bmap->size();
allocated += alloc_cont_bits(num_blocks - allocated,
&iter, &scanned);
- marker_add += scanned;
/*
* Iter need to go one step back for case when allocation
* is not enough and start from last bitmap again.
}
if (allocated < num_blocks) {
- free_blocks_int((*start_block - zone_block_offset), allocated);
+ free_blocks_int(*start_block, allocated);
allocated = 0;
+ *start_block = 0;
} else {
/*
* Got required.
}
}
- m_bmap_list->incr_marker(marker_add);
add_used_blocks(allocated);
-
return allocated;
}
/*
* Allocate N blocks, dis-contiguous are fine
*/
-int64_t BitMapZone::alloc_blocks_dis(int64_t num_blocks, int64_t *alloc_blocks)
+int64_t BitMapZone::alloc_blocks_dis(int64_t num_blocks, int64_t zone_blk_off, int64_t *alloc_blocks)
{
int64_t bmap_idx = 0;
int bit = 0;
- int64_t marker_add = 0;
BmapEntry *bmap = NULL;
int64_t allocated = 0;
int64_t blk_off = 0;
- bmap_idx = m_bmap_list->get_marker() / BmapEntry::size();
- bit = m_bmap_list->get_marker() % BmapEntry::size();
+ debug_assert(check_locked());
- BmapEntityListIter iter = BmapEntityListIter(
- m_bmap_list, bmap_idx);
+ bmap_idx = 0;
+ bit = 0;
+
+ BitMapEntityIter <BmapEntry> iter = BitMapEntityIter<BmapEntry>(
+ m_bmap_list, bmap_idx);
while ((bmap = (BmapEntry *) iter.next())) {
int64_t scanned = 0;
- blk_off = (iter.index() - 1) * BmapEntry::size() +
- m_zone_num * size();
+ blk_off = (iter.index() - 1) * BmapEntry::size() + zone_blk_off;
allocated += bmap->find_any_free_bits(bit, num_blocks - allocated,
&alloc_blocks[allocated], blk_off, &scanned);
- marker_add += scanned;
}
- m_bmap_list->incr_marker(marker_add);
add_used_blocks(allocated);
return allocated;
}
/*
- * Zone List related functions
+ * BitMapArea Leaf and non-Leaf functions.
*/
-
-ZoneList::ZoneList(BmapEntity **list, int64_t len) :
- BmapEntityList(list, len)
+int64_t BitMapArea::get_span_size()
{
- m_marker = 0;
- return;
+ return BITMAP_SPAN_SIZE;
}
-ZoneList::ZoneList(BmapEntity **list, int64_t len, int64_t marker) :
- BmapEntityList(list, len)
+bmap_area_type_t BitMapArea::level_to_type(int level)
{
- m_marker = marker;
- return;
+ if (level == 0) {
+ return ZONE;
+ } else if (level == 1) {
+ return LEAF;
+ } else {
+ return NON_LEAF;
+ }
}
-int64_t ZoneList::incr_marker(int64_t add) {
- std::lock_guard<std::mutex> l (m_marker_mutex);
- m_marker += add;
- m_marker %= size();
-
- return m_marker;
+int BitMapArea::get_level(int64_t total_blocks)
+{
+ int level = 1;
+ int64_t span_size = BitMapArea::get_span_size();
+ int64_t spans = span_size * span_size;
+ while (spans < total_blocks) {
+ spans *= span_size;
+ level++;
+ }
+ return level;
}
-int64_t ZoneList::get_marker()
+int64_t BitMapArea::get_index()
{
- std::lock_guard<std::mutex> l (m_marker_mutex);
- return m_marker;
+ return m_area_index;
}
-void ZoneList::set_marker(int64_t val)
+bmap_area_type_t BitMapArea::get_type()
{
- std::lock_guard<std::mutex> l (m_marker_mutex);
- m_marker = val;
+ return m_type;
}
/*
- * Main allocator functions.
+ * BitMapArea Leaf and Internal
*/
-BitAllocator::BitAllocator(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode)
+BitMapAreaIN::BitMapAreaIN()
{
- init(total_blocks, zone_size_block, mode, false);
+ // nothing
}
-BitAllocator::BitAllocator(int64_t total_blocks, int64_t zone_size_block,
- bmap_alloc_mode_t mode, bool def)
+void BitMapAreaIN::init_common(int64_t total_blocks, int64_t area_idx, bool def)
{
- init(total_blocks, zone_size_block, mode, def);
+ m_area_index = area_idx;
+ m_total_blocks = total_blocks;
+ m_level = BitMapArea::get_level(total_blocks);
+ m_type = BitMapArea::level_to_type(m_level);
+ m_reserved_blocks = 0;
+
+ m_used_blocks = def? total_blocks: 0;
}
-void BitAllocator::init(int64_t total_blocks, int64_t zone_size_block,
- bmap_alloc_mode_t mode, bool def)
+void BitMapAreaIN::init(int64_t total_blocks, int64_t area_idx, bool def)
{
- int64_t total_zones = 0;
-
- if (mode != SERIAL && mode != CONCURRENT) {
- debug_assert(0);
- }
+ int64_t num_child = 0;
+ debug_assert(!(total_blocks % BmapEntry::size()));
- if (total_blocks <= 0) {
- debug_assert(0);
- }
+ init_common(total_blocks, area_idx, def);
+ int64_t level_factor = pow(BitMapArea::get_span_size(), m_level);
- if (zone_size_block == 0 ||
- zone_size_block < BmapEntry::size()) {
- debug_assert(0);
+ num_child = total_blocks / level_factor;
+ debug_assert(num_child < MAX_INT16);
+ if (total_blocks % level_factor) {
+ num_child++;
}
- zone_size_block = (zone_size_block / BmapEntry::size()) *
- BmapEntry::size();
+ m_child_size_blocks = level_factor;
- if (total_blocks < zone_size_block) {
- debug_assert(0);
+ BitMapArea **children = new BitMapArea*[num_child];
+ int i = 0;
+ for (i = 0; i < num_child - 1; i++) {
+ if (m_level <= 2) {
+ children[i] = new BitMapAreaLeaf(m_child_size_blocks, i, def);
+ } else {
+ children[i] = new BitMapAreaIN(m_child_size_blocks, i, def);
+ }
+ total_blocks -= m_child_size_blocks;
}
- total_blocks = (total_blocks / zone_size_block) * zone_size_block;
- total_zones = total_blocks / zone_size_block;
-
- debug_assert(total_blocks > 0);
- debug_assert(total_zones > 0);
-
- pthread_rwlock_init(&m_alloc_slow_lock, NULL);
-
- m_total_blocks = total_blocks;
- m_total_zones = total_zones;
- m_zone_size_blocks = zone_size_block;
- m_alloc_mode = mode;
- m_allocated_blocks = def? total_blocks: 0;
- m_reserved_blocks = 0;
-
- BmapEntity **zonesp = new BmapEntity* [total_zones];
- for (int i = 0; i < total_zones; i++) {
- zonesp[i] = new BitMapZone(i, zone_size_block, def);
+ int last_level = BitMapArea::get_level(total_blocks);
+ if (last_level == 1) {
+ children[i] = new BitMapAreaLeaf(total_blocks, i, def);
+ } else {
+ children[i] = new BitMapAreaIN(total_blocks, i, def);
}
-
- BmapEntityList *list = new ZoneList(zonesp, total_zones, 0);
-
- m_zone_list = list;
-
- m_state = ALLOC_ACTIVE;
+ BitMapAreaList *list = new BitMapAreaList(children, num_child);
+ m_child_list = list;
+ m_num_child = num_child;
}
-BitAllocator::~BitAllocator()
+BitMapAreaIN::BitMapAreaIN(int64_t total_blocks, int64_t area_idx)
{
- alloc_lock(true);
-
- m_state = ALLOC_DESTROY;
- BmapEntityList *list = m_zone_list;
-
- for (int64_t i = 0; i < list->size(); i++) {
- BitMapZone *zone = (BitMapZone *) list->get_nth_item(i);
- delete zone;
- }
-
- delete [] list->get_item_list();
- delete list;
-
- alloc_unlock();
- pthread_rwlock_destroy(&m_alloc_slow_lock);
+ init(total_blocks, area_idx, false);
}
-void
-BitAllocator::shutdown()
+BitMapAreaIN::BitMapAreaIN(int64_t total_blocks, int64_t area_idx, bool def)
{
- alloc_lock(true);
- serial_lock();
+ init(total_blocks, area_idx, def);
}
-int64_t BitAllocator::sub_used_blocks(int64_t num_blocks)
+BitMapAreaIN::~BitMapAreaIN()
{
- int64_t used_blks =
- std::atomic_fetch_sub(&m_allocated_blocks, num_blocks);
- debug_assert(used_blks > 0);
- return used_blks;
}
-int64_t BitAllocator::add_used_blocks(int64_t num_blocks)
+void BitMapAreaIN::shutdown()
{
- return std::atomic_fetch_add(&m_allocated_blocks, num_blocks) + num_blocks;
+ lock_excl();
+ m_total_blocks = -1;
+ m_area_index = -2;
+ unlock();
}
-int64_t BitAllocator::get_used_blocks()
+bool BitMapAreaIN::child_check_n_lock(BitMapArea *child, int64_t required)
{
- return std::atomic_load(&m_allocated_blocks);
-}
+ child->lock_shared();
-int64_t BitAllocator::get_reserved_blocks()
-{
- return m_reserved_blocks;
+ if (child->is_exhausted()) {
+ child->unlock();
+ return false;
+ }
+
+ return true;
}
-int64_t BitAllocator::size()
+void BitMapAreaIN::child_unlock(BitMapArea *child)
{
- return m_total_blocks;
+ child->unlock();
}
-bool BitAllocator::reserve_blocks(int64_t num)
+bool BitMapAreaIN::is_exhausted()
{
- bool res = false;
- std::lock_guard<std::mutex> l(m_res_blocks_lock);
- if (add_used_blocks(num) <= size()) {
- res = true;
- m_reserved_blocks += num;
- } else {
- sub_used_blocks(num);
- res = false;
+ if (get_used_blocks() == size()) {
+ return true;
}
-
- debug_assert(m_allocated_blocks <= size());
- return res;
+ return false;
}
-void BitAllocator::unreserve(int64_t needed, int64_t allocated)
+int64_t BitMapAreaIN::add_used_blocks(int64_t blks)
{
- std::lock_guard<std::mutex> l(m_res_blocks_lock);
- sub_used_blocks(needed - allocated);
- m_reserved_blocks -= needed;
- debug_assert(m_allocated_blocks >= 0);
+ std::lock_guard<std::mutex> l(m_blocks_lock);
+ m_used_blocks += blks;
+ return m_used_blocks;
}
-void BitAllocator::unreserve_blocks(int64_t unused)
+int64_t BitMapAreaIN::sub_used_blocks(int64_t num_blocks)
{
- unreserve(unused, 0);
-}
+ std::lock_guard<std::mutex> l(m_blocks_lock);
-void BitAllocator::serial_lock()
-{
- if (m_alloc_mode == SERIAL) {
- m_serial_mutex.lock();
- }
+ int64_t used_blks = m_used_blocks;
+ m_used_blocks -= num_blocks;
+ debug_assert(m_used_blocks >= 0);
+ return used_blks;
}
-void BitAllocator::serial_unlock()
+int64_t BitMapAreaIN::get_used_blocks()
{
- if (m_alloc_mode == SERIAL) {
- m_serial_mutex.unlock();
- }
+ std::lock_guard<std::mutex> l(m_blocks_lock);
+ return m_used_blocks;
}
-void BitAllocator::alloc_lock(bool write) {
- if (write) {
- pthread_rwlock_wrlock(&m_alloc_slow_lock);
- } else {
- pthread_rwlock_rdlock(&m_alloc_slow_lock);
+bool BitMapAreaIN::reserve_blocks(int64_t num)
+{
+ bool res = false;
+ std::lock_guard<std::mutex> u_l(m_blocks_lock);
+ if (m_used_blocks + num <= size()) {
+ m_used_blocks += num;
+ m_reserved_blocks += num;
+ res = true;
}
+ debug_assert(m_used_blocks <= size());
+ return res;
}
-void BitAllocator::alloc_unlock()
+void BitMapAreaIN::unreserve(int64_t needed, int64_t allocated)
{
- pthread_rwlock_unlock(&m_alloc_slow_lock);
+ std::lock_guard<std::mutex> l(m_blocks_lock);
+ m_used_blocks -= (needed - allocated);
+ m_reserved_blocks -= needed;
+ debug_assert(m_used_blocks >= 0);
+ debug_assert(m_reserved_blocks >= 0);
}
-
-bool BitAllocator::zone_free_to_alloc(BmapEntity *item,
- int64_t required, bool wait)
+int64_t BitMapAreaIN::get_reserved_blocks()
{
- BitMapZone *zone = (BitMapZone *) item;
- if (!zone->lock_zone(wait)) {
- return false;
- }
-
- if (zone->is_exhausted()) {
- if (zone->get_index() ==
- m_zone_list->get_marker()) {
- m_zone_list->incr_marker(1);
- }
- zone->unlock_zone();
- return false;
- }
-
- if (zone->get_used_blocks() + required >
- zone->size()) {
- zone->unlock_zone();
- return false;
- }
-
- return true;
+ return m_reserved_blocks;
}
-bool BitAllocator::is_allocated(int64_t start_block, int64_t num_blocks)
+bool BitMapAreaIN::is_allocated(int64_t start_block, int64_t num_blocks)
{
- BitMapZone *zone = NULL;
- int64_t zone_block_offset = 0;
- int64_t falling_in_zone = 0;
+ BitMapArea *area = NULL;
+ int64_t area_block_offset = 0;
+ int64_t falling_in_area = 0;
debug_assert(start_block >= 0 &&
(start_block + num_blocks <= size()));
assert(start_block >= 0);
while (num_blocks) {
- zone = (BitMapZone *) m_zone_list->get_nth_item(
- start_block / m_zone_size_blocks);
+ area = (BitMapArea *) m_child_list->get_nth_item(
+ start_block / m_child_size_blocks);
- zone_block_offset = start_block % m_zone_size_blocks;
- falling_in_zone = MIN(m_zone_size_blocks - zone_block_offset,
+ area_block_offset = start_block % area->size();
+ falling_in_area = MIN(m_child_size_blocks - area_block_offset,
num_blocks);
- if (!zone->is_allocated(zone_block_offset, falling_in_zone)) {
+ if (!area->is_allocated(area_block_offset, falling_in_area)) {
return false;
}
- start_block += falling_in_zone;
- num_blocks -= falling_in_zone;
+ start_block += falling_in_area;
+ num_blocks -= falling_in_area;
}
return true;
}
-bool BitAllocator::is_allocated(int64_t *alloc_blocks, int64_t num_blocks)
+bool BitMapAreaIN::is_allocated(int64_t *alloc_blocks, int64_t num_blocks, int64_t blk_off)
{
for (int64_t i = 0; i < num_blocks; i++) {
- if (!is_allocated(alloc_blocks[i], 1))
+ if (!is_allocated(alloc_blocks[i] - blk_off, 1)) {
return false;
+ }
}
return true;
}
-/*
- * Allocate N contiguous blocks.
- */
-int64_t BitAllocator::alloc_blocks_int(int64_t num_blocks,
- int64_t *start_block, bool wait)
+int64_t BitMapAreaIN::alloc_blocks_int(bool wait, bool wrap,
+ int64_t num_blocks, int64_t *start_block)
{
-
- int64_t zone_mark = m_zone_list->get_marker();
- BitMapZone *zone = NULL;
+ BitMapArea *child = NULL;
int64_t allocated = 0;
+ *start_block = 0;
BmapEntityListIter iter = BmapEntityListIter(
- m_zone_list, zone_mark, true);
+ m_child_list, 0, wrap);
- while ((zone = (BitMapZone *) iter.next())) {
-
- if (!zone_free_to_alloc(zone,
- num_blocks - allocated, wait)) {
+ while ((child = (BitMapArea *) iter.next())) {
+ if (!child_check_n_lock(child, num_blocks - allocated)) {
continue;
}
- allocated = zone->alloc_blocks(num_blocks, start_block);
-
- zone->unlock_zone();
+ allocated = child->alloc_blocks(wait, num_blocks, start_block);
+ child_unlock(child);
if (allocated == num_blocks) {
+ (*start_block) += child->get_index() * m_child_size_blocks;
break;
}
- zone->free_blocks(*start_block, allocated);
+ child->free_blocks(*start_block, allocated);
+ *start_block = 0;
allocated = 0;
}
-
return allocated;
}
-bool BitAllocator::check_input_dis(int64_t num_blocks)
+int64_t BitMapAreaIN::alloc_blocks(bool wait, int64_t num_blocks,
+ int64_t *start_block)
{
- if (num_blocks == 0) {
- return false;
- }
+ int64_t allocated = 0;
- if (num_blocks > size()) {
- return false;
+ lock_shared();
+
+ if (!reserve_blocks(num_blocks)) {
+ goto exit;
}
- return true;
+
+ allocated = alloc_blocks_int(wait, false, num_blocks, start_block);
+
+ unreserve(num_blocks, allocated);
+ debug_assert((get_used_blocks() <= m_total_blocks));
+ debug_assert(is_allocated(*start_block, allocated));
+
+exit:
+ unlock();
+ return allocated;
}
-bool BitAllocator::check_input(int64_t num_blocks)
+int64_t BitMapAreaIN::alloc_blocks_dis_int(bool wait, int64_t num_blocks,
+ int64_t area_blk_off, int64_t *block_list)
{
- if (num_blocks == 0) {
- return false;
- }
+ BitMapArea *child = NULL;
+ int64_t allocated = 0;
+ int64_t blk_off = 0;
- if (num_blocks > m_zone_size_blocks) {
- return false;
+ BmapEntityListIter iter = BmapEntityListIter(
+ m_child_list, 0, true);
+
+ while ((child = (BitMapArea *) iter.next())) {
+ if (!child_check_n_lock(child, 1)) {
+ continue;
+ }
+
+ blk_off = child->get_index() * m_child_size_blocks + area_blk_off;
+ allocated += child->alloc_blocks_dis(wait, num_blocks,
+ blk_off, &block_list[allocated]);
+ child_unlock(child);
+ if (allocated == num_blocks) {
+ break;
+ }
}
- return true;
+
+ return allocated;
}
-/*
- * Interface to allocate blocks after reserve.
- */
-int64_t BitAllocator::alloc_blocks_res(int64_t num_blocks, int64_t *start_block)
+int64_t BitMapAreaIN::alloc_blocks_dis(bool wait, int64_t num_blocks,
+ int64_t blk_off, int64_t *block_list)
{
- int scans = 2;
int64_t allocated = 0;
- if (!check_input(num_blocks)) {
- return 0;
- }
+ lock_shared();
+ allocated += alloc_blocks_dis_int(wait, num_blocks, blk_off, &block_list[allocated]);
+ add_used_blocks(allocated);
+ debug_assert(is_allocated(block_list, allocated, blk_off));
- alloc_lock(false);
- serial_lock();
+ unlock();
+ return allocated;
+}
- while (scans && !allocated) {
- allocated = alloc_blocks_int(num_blocks, start_block, false);
- scans --;
+
+void BitMapAreaIN::set_blocks_used_int(int64_t start_block, int64_t num_blocks)
+{
+ BitMapArea *child = NULL;
+ int64_t child_block_offset = 0;
+ int64_t falling_in_child = 0;
+ int64_t blks = num_blocks;
+ int64_t start_blk = start_block;
+
+ assert(start_block >= 0);
+
+ while (blks) {
+ child = (BitMapArea *) m_child_list->get_nth_item(
+ start_blk / m_child_size_blocks);
+
+ child_block_offset = start_blk % child->size();
+ falling_in_child = MIN(m_child_size_blocks - child_block_offset,
+ blks);
+ child->set_blocks_used(child_block_offset, falling_in_child);
+ start_blk += falling_in_child;
+ blks -= falling_in_child;
}
- if (!allocated) {
- /*
- * Could not find anything in two scans.
- * Go in serial manner.
- */
- allocated = alloc_blocks_int(num_blocks, start_block, true);
+ add_used_blocks(num_blocks);
+ debug_assert(is_allocated(start_block, num_blocks));
+}
+
+void BitMapAreaIN::set_blocks_used(int64_t start_block, int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return;
}
- debug_assert(is_allocated(*start_block, allocated));
- unreserve(num_blocks, allocated);
+ lock_shared();
+ set_blocks_used_int(start_block, num_blocks);
+ unlock();
+}
- serial_unlock();
- alloc_unlock();
+void BitMapAreaIN::free_blocks_int(int64_t start_block, int64_t num_blocks)
+{
+ BitMapArea *child = NULL;
+ int64_t child_block_offset = 0;
+ int64_t falling_in_child = 0;
+
+ debug_assert(start_block >= 0 &&
+ (start_block + num_blocks) <= size());
+
+ if (num_blocks == 0) {
+ return;
+ }
+
+ assert(start_block >= 0);
+
+ while (num_blocks) {
+ child = (BitMapArea *) m_child_list->get_nth_item(
+ start_block / m_child_size_blocks);
+
+ child_block_offset = start_block % m_child_size_blocks;
+
+ falling_in_child = MIN(m_child_size_blocks - child_block_offset,
+ num_blocks);
+ child->free_blocks(child_block_offset, falling_in_child);
+ start_block += falling_in_child;
+ num_blocks -= falling_in_child;
+ }
- return allocated;
}
+void BitMapAreaIN::free_blocks(int64_t start_block, int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return;
+ }
+ lock_shared();
+ debug_assert(is_allocated(start_block, num_blocks));
-int64_t BitAllocator::alloc_blocks(int64_t num_blocks, int64_t *start_block)
+ free_blocks_int(start_block, num_blocks);
+ (void) sub_used_blocks(num_blocks);
+
+ unlock();
+}
+
+/*
+ * BitMapArea Leaf
+ */
+BitMapAreaLeaf::BitMapAreaLeaf(int64_t total_blocks, int64_t area_idx)
{
- int scans = 2;
- int64_t allocated = 0;
+ init(total_blocks, area_idx, false);
+}
- if (!check_input(num_blocks)) {
- return 0;
+BitMapAreaLeaf::BitMapAreaLeaf(int64_t total_blocks, int64_t area_idx, bool def)
+{
+ init(total_blocks, area_idx, def);
+}
+
+void BitMapAreaLeaf::init(int64_t total_blocks, int64_t area_idx,
+ bool def)
+{
+ int64_t num_child = 0;
+ debug_assert(!(total_blocks % BmapEntry::size()));
+
+ init_common(total_blocks, area_idx, def);
+ num_child = total_blocks / pow(BitMapArea::get_span_size(), m_level);
+ m_child_size_blocks = total_blocks / num_child;
+
+ debug_assert(m_level == 1);
+ BitMapArea **children = new BitMapArea*[num_child];
+ for (int i = 0; i < num_child; i++) {
+ children[i] = new BitMapZone(m_child_size_blocks, i, def);
}
- alloc_lock(false);
- serial_lock();
+ BitMapAreaList *list = new BitMapAreaList(children, num_child);
- if (!reserve_blocks(num_blocks)) {
- goto exit;
+ m_child_list = list;
+ m_num_child = num_child;
+
+ BitMapAreaLeaf::incr_count();
+}
+
+BitMapAreaLeaf::~BitMapAreaLeaf()
+{
+ lock_excl();
+
+ BitMapAreaList *list = m_child_list;
+ for (int64_t i = 0; i < list->size(); i++) {
+ BitMapArea *child = (BitMapArea *) list->get_nth_item(i);
+ delete child;
}
- while (scans && !allocated) {
- allocated = alloc_blocks_int(num_blocks, start_block, false);
- scans --;
+ delete [] list->get_item_list();
+ delete list;
+
+ unlock();
+}
+
+bool BitMapAreaLeaf::child_check_n_lock(BitMapArea *child, int64_t required, bool lock)
+{
+ if (lock) {
+ child->lock_excl();
+ } else if (!child->lock_excl_try()) {
+ return false;
}
- if (!allocated) {
- /*
- * Could not find anything in two scans.
- * Go in serial manner.
- */
- allocated = alloc_blocks_int(num_blocks, start_block, true);
+ if (child->is_exhausted()) {
+ child->unlock();
+ return false;
}
+ return true;
+}
- unreserve(num_blocks, allocated);
- debug_assert((m_allocated_blocks <= m_total_blocks));
- debug_assert(is_allocated(*start_block, allocated));
+void BitMapAreaLeaf::child_unlock(BitMapArea *child)
+{
+ child->unlock();
+}
-exit:
- serial_unlock();
- alloc_unlock();
+int64_t BitMapAreaLeaf::alloc_blocks_int(bool wait, bool wrap,
+ int64_t num_blocks, int64_t *start_block)
+{
+ BitMapArea *child = NULL;
+ int64_t allocated = 0;
+
+ *start_block = 0;
+
+ BmapEntityListIter iter = BmapEntityListIter(
+ m_child_list, 0, false);
+
+ while ((child = iter.next())) {
+ if (!child_check_n_lock(child, num_blocks - allocated, false)) {
+ continue;
+ }
+ debug_assert(child->get_type() == ZONE);
+
+ allocated = child->alloc_blocks(num_blocks, start_block);
+ child_unlock(child);
+ if (allocated == num_blocks) {
+ (*start_block) += child->get_index() * m_child_size_blocks;
+ break;
+ }
+
+ child->free_blocks(*start_block, allocated);
+ *start_block = 0;
+ allocated = 0;
+ }
+ return allocated;
+}
+
+int64_t BitMapAreaLeaf::alloc_blocks_dis_int(bool wait, int64_t num_blocks,
+ int64_t area_blk_off, int64_t *block_list)
+{
+ BitMapArea *child = NULL;
+ int64_t allocated = 0;
+ int64_t blk_off = 0;
+
+ BmapEntityListIter iter = BmapEntityListIter(
+ m_child_list, 0, false);
+ while ((child = (BitMapArea *) iter.next())) {
+ if (!child_check_n_lock(child, 1, false)) {
+ continue;
+ }
+
+ blk_off = child->get_index() * m_child_size_blocks + area_blk_off;
+ allocated += child->alloc_blocks_dis(num_blocks, blk_off, &block_list[allocated]);
+ child_unlock(child);
+ if (allocated == num_blocks) {
+ break;
+ }
+ }
return allocated;
}
-void BitAllocator::free_blocks_int(int64_t start_block, int64_t num_blocks)
+void BitMapAreaLeaf::free_blocks_int(int64_t start_block, int64_t num_blocks)
{
- BitMapZone *zone = NULL;
- int64_t zone_block_offset = 0;
- int64_t falling_in_zone = 0;
+ BitMapArea *child = NULL;
+ int64_t child_block_offset = 0;
+ int64_t falling_in_child = 0;
debug_assert(start_block >= 0 &&
(start_block + num_blocks) <= size());
assert(start_block >= 0);
while (num_blocks) {
- zone = (BitMapZone *) m_zone_list->get_nth_item(
- start_block / m_zone_size_blocks);
+ child = (BitMapArea *) m_child_list->get_nth_item(
+ start_block / m_child_size_blocks);
- zone_block_offset = start_block % m_zone_size_blocks;
+ child_block_offset = start_block % m_child_size_blocks;
- falling_in_zone = MIN(m_zone_size_blocks - zone_block_offset,
+ falling_in_child = MIN(m_child_size_blocks - child_block_offset,
num_blocks);
- zone->free_blocks(zone_block_offset, falling_in_zone);
- start_block += falling_in_zone;
- num_blocks -= falling_in_zone;
+
+ child->lock_excl();
+ child->free_blocks(child_block_offset, falling_in_child);
+ child->unlock();
+ start_block += falling_in_child;
+ num_blocks -= falling_in_child;
}
+}
+/*
+ * BitMapArea List related functions
+ */
+BitMapAreaList::BitMapAreaList(BitMapArea **list, int64_t len)
+{
+ m_items = list;
+ m_num_items = len;
+ return;
}
-void BitAllocator::free_blocks(int64_t start_block, int64_t num_blocks)
+/*
+ * Main allocator functions.
+ */
+BitAllocator::BitAllocator(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode)
{
- alloc_lock(false);
- debug_assert(is_allocated(start_block, num_blocks));
+ init_check(total_blocks, zone_size_block, mode, false, false);
+}
- free_blocks_int(start_block, num_blocks);
- (void) sub_used_blocks(num_blocks);
+BitAllocator::BitAllocator(int64_t total_blocks, int64_t zone_size_block,
+ bmap_alloc_mode_t mode, bool def):
+ BitMapAreaIN(total_blocks, zone_size_block, def)
+{
+ init_check(total_blocks, zone_size_block, mode, def, false);
+}
- alloc_unlock();
+BitAllocator::BitAllocator(int64_t total_blocks, int64_t zone_size_block,
+ bmap_alloc_mode_t mode, bool def, bool stats_on):
+ BitMapAreaIN(total_blocks, zone_size_block, def)
+{
+ init_check(total_blocks, zone_size_block, mode, def, stats_on);
}
-void BitAllocator::set_blocks_used(int64_t start_block, int64_t num_blocks)
+void BitAllocator::init_check(int64_t total_blocks, int64_t zone_size_block,
+ bmap_alloc_mode_t mode, bool def, bool stats_on)
{
- BitMapZone *zone = NULL;
- int64_t zone_block_offset = 0;
- int64_t falling_in_zone = 0;
- int64_t blks = num_blocks;
- int64_t start_blk = start_block;
+ int64_t total_zones = 0;
- if (num_blocks == 0) {
- return;
+ if (mode != SERIAL && mode != CONCURRENT) {
+ debug_assert(0);
}
- alloc_lock(false);
+ if (total_blocks <= 0) {
+ debug_assert(0);
+ }
+
+ if (zone_size_block == 0 ||
+ zone_size_block < BmapEntry::size()) {
+ debug_assert(0);
+ }
+
+ zone_size_block = (zone_size_block / BmapEntry::size()) *
+ BmapEntry::size();
+
+ if (total_blocks < zone_size_block) {
+ debug_assert(0);
+ }
+
+ total_blocks = (total_blocks / zone_size_block) * zone_size_block;
+ total_zones = total_blocks / zone_size_block;
+
+ debug_assert(total_blocks > 0);
+ debug_assert(total_zones > 0);
+ m_alloc_mode = mode;
+ m_is_stats_on = stats_on;
+ if (m_is_stats_on) {
+ m_stats = new BitAllocatorStats();
+ }
+
+ pthread_rwlock_init(&m_rw_lock, NULL);
+ init(total_blocks, 0, def);
+}
+
+void BitAllocator::lock_excl()
+{
+ pthread_rwlock_wrlock(&m_rw_lock);
+}
+
+void BitAllocator::lock_shared()
+{
+ pthread_rwlock_rdlock(&m_rw_lock);
+}
+
+void BitAllocator::unlock()
+{
+ pthread_rwlock_unlock(&m_rw_lock);
+}
+
+BitAllocator::~BitAllocator()
+{
+ lock_excl();
+
+ BitMapAreaList *list = m_child_list;
+ for (int64_t i = 0; i < list->size(); i++) {
+ BitMapArea *child = (BitMapArea *) list->get_nth_item(i);
+ delete child;
+ }
+
+ delete [] list->get_item_list();
+ delete list;
+
+ unlock();
+ pthread_rwlock_destroy(&m_rw_lock);
+}
+
+void
+BitAllocator::shutdown()
+{
+ lock_excl();
serial_lock();
+}
- assert(start_block >= 0);
+void BitAllocator::unreserve_blocks(int64_t unused)
+{
+ unreserve(unused, 0);
+}
- while (blks) {
- zone = (BitMapZone *) m_zone_list->get_nth_item(
- start_blk / m_zone_size_blocks);
+void BitAllocator::serial_lock()
+{
+ if (m_alloc_mode == SERIAL) {
+ m_serial_mutex.lock();
+ }
+}
- zone_block_offset = start_blk % m_zone_size_blocks;
- falling_in_zone = MIN(m_zone_size_blocks - zone_block_offset,
- blks);
- zone->set_blocks_used(zone_block_offset, falling_in_zone);
- start_blk += falling_in_zone;
- blks -= falling_in_zone;
+void BitAllocator::serial_unlock()
+{
+ if (m_alloc_mode == SERIAL) {
+ m_serial_mutex.unlock();
}
+}
- add_used_blocks(num_blocks);
- debug_assert(is_allocated(start_block, num_blocks));
+bool BitAllocator::child_check_n_lock(BitMapArea *child, int64_t required)
+{
+ child->lock_shared();
+
+ if (child->is_exhausted()) {
+ child->unlock();
+ return false;
+ }
+
+
+ return true;
+}
+
+void BitAllocator::child_unlock(BitMapArea *child)
+{
+ child->unlock();
+}
+
+bool BitAllocator::check_input_dis(int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return false;
+ }
+
+ if (num_blocks > size()) {
+ return false;
+ }
+ return true;
+}
+
+bool BitAllocator::check_input(int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return false;
+ }
+
+ if (num_blocks > BitMapArea::get_span_size()) {
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Interface to allocate blocks after reserve.
+ */
+int64_t BitAllocator::alloc_blocks_res(int64_t num_blocks, int64_t *start_block)
+{
+ int scans = 1;
+ int64_t allocated = 0;
+
+ *start_block = 0;
+ if (!check_input(num_blocks)) {
+ return 0;
+ }
+
+ lock_shared();
+ serial_lock();
+
+ while (scans && !allocated) {
+ allocated = alloc_blocks_int(false, true, num_blocks, start_block);
+ scans --;
+ }
+ if (is_stats_on()) {
+ m_stats->add_concurrent_scans(scans);
+ }
+
+ if (!allocated) {
+ /*
+ * Could not find anything in two scans.
+ * Go in serial manner.
+ */
+ serial_unlock();
+ unlock();
+ lock_excl();
+ serial_lock();
+ allocated = alloc_blocks_int(false, true, num_blocks, start_block);
+ if (is_stats_on()) {
+ m_stats->add_serial_scans(1);
+ }
+ }
+
+ debug_assert(is_allocated(*start_block, allocated));
+ unreserve(num_blocks, allocated);
serial_unlock();
- alloc_unlock();
+ unlock();
+
+ return allocated;
}
-int64_t BitAllocator::alloc_blocks_dis_int(int64_t num_blocks,
- int64_t *block_list, bool lock)
+int64_t BitAllocator::alloc_blocks(int64_t num_blocks, int64_t *start_block)
{
- int64_t zone_mark = m_zone_list->get_marker();
- BitMapZone *zone = NULL;
+ int scans = 1;
int64_t allocated = 0;
- alloc_lock(false);
+ *start_block = 0;
+ if (!check_input(num_blocks)) {
+ debug_assert(0);
+ return 0;
+ }
+
+ lock_shared();
serial_lock();
- BmapEntityListIter iter = BmapEntityListIter(
- m_zone_list, zone_mark, true);
+ if (!reserve_blocks(num_blocks)) {
+ goto exit;
+ }
+ if (is_stats_on()) {
+ m_stats->add_alloc_calls(1);
+ m_stats->add_allocated(num_blocks);
+ }
- while ((zone = (BitMapZone *) iter.next())) {
+ while (scans && !allocated) {
+ allocated = alloc_blocks_int(false, true, num_blocks, start_block);
+ scans--;
+ }
+ if (is_stats_on()) {
+ m_stats->add_concurrent_scans(scans);
+ }
- if (!zone_free_to_alloc(zone, 1, lock)) {
- continue;
+ if (!allocated) {
+ /*
+ * Could not find anything in two scans.
+ * Go in serial manner.
+ */
+ serial_unlock();
+ unlock();
+ lock_excl();
+ serial_lock();
+ allocated = alloc_blocks_int(false, true, num_blocks, start_block);
+ if (!allocated) {
+ allocated = alloc_blocks_int(false, true, num_blocks, start_block);
+ debug_assert(allocated);
}
-
- allocated += zone->alloc_blocks_dis(num_blocks, &block_list[allocated]);
- zone->unlock_zone();
- if (allocated == num_blocks) {
- break;
+ if (is_stats_on()) {
+ m_stats->add_serial_scans(1);
}
}
- alloc_unlock();
+ unreserve(num_blocks, allocated);
+ debug_assert((get_used_blocks() <= m_total_blocks));
+ debug_assert(is_allocated(*start_block, allocated));
+
+exit:
serial_unlock();
+ unlock();
return allocated;
}
+void BitAllocator::free_blocks(int64_t start_block, int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return;
+ }
+
+ if (is_stats_on()) {
+ m_stats->add_free_calls(1);
+ m_stats->add_freed(num_blocks);
+ }
+
+ lock_shared();
+ debug_assert(is_allocated(start_block, num_blocks));
+
+ free_blocks_int(start_block, num_blocks);
+ (void) sub_used_blocks(num_blocks);
+
+ unlock();
+}
+
+
+void BitAllocator::set_blocks_used(int64_t start_block, int64_t num_blocks)
+{
+ if (num_blocks == 0) {
+ return;
+ }
+
+ lock_shared();
+ serial_lock();
+ set_blocks_used_int(start_block, num_blocks);
+
+ serial_unlock();
+ unlock();
+}
+
/*
* Allocate N dis-contiguous blocks.
*/
int64_t BitAllocator::alloc_blocks_dis(int64_t num_blocks, int64_t *block_list)
{
- int scans = 2;
+ int scans = 1;
int64_t allocated = 0;
+ /*
+ * This is root so offset is 0 yet.
+ */
+ int64_t blk_off = 0;
if (!check_input_dis(num_blocks)) {
return 0;
}
+ if (is_stats_on()) {
+ m_stats->add_alloc_calls(1);
+ m_stats->add_allocated(num_blocks);
+ }
+
+ lock_shared();
+ serial_lock();
if (!reserve_blocks(num_blocks)) {
- return 0;
+ goto exit;
}
+
while (scans && allocated < num_blocks) {
- allocated += alloc_blocks_dis_int(num_blocks, &block_list[allocated], false);
+ allocated += alloc_blocks_dis_int(false, num_blocks, blk_off, &block_list[allocated]);
scans --;
}
+ if (is_stats_on()) {
+ m_stats->add_concurrent_scans(scans);
+ }
if (allocated < num_blocks) {
/*
* Go in serial manner to get something for sure
* if available.
*/
- allocated += alloc_blocks_dis_int(num_blocks, &block_list[allocated], true);
+ serial_unlock();
+ unlock();
+ lock_excl();
+ serial_lock();
+ allocated += alloc_blocks_dis_int(false, num_blocks, blk_off, &block_list[allocated]);
+ if (is_stats_on()) {
+ m_stats->add_serial_scans(1);
+ }
}
unreserve(num_blocks, allocated);
- debug_assert(is_allocated(block_list, allocated));
+ debug_assert(is_allocated(block_list, allocated, 0));
+
+exit:
+ serial_unlock();
+ unlock();
return allocated;
}
void BitAllocator::free_blocks_dis(int64_t num_blocks, int64_t *block_list)
{
- alloc_lock(false);
+ lock_shared();
+ if (is_stats_on()) {
+ m_stats->add_free_calls(1);
+ m_stats->add_freed(num_blocks);
+ }
for (int64_t i = 0; i < num_blocks; i++) {
free_blocks_int(block_list[i], 1);
debug_assert(get_used_blocks() > 0);
sub_used_blocks(num_blocks);
- alloc_unlock();
+ unlock();
}
#ifndef CEPH_OS_BLUESTORE_BITALLOCATOR_H
#define CEPH_OS_BLUESTORE_BITALLOCATOR_H
+#define debug_assert assert
+#define BITMAP_SPAN_SIZE (1024)
+
+#include <assert.h>
#include <stdint.h>
#include <pthread.h>
#include <mutex>
#include <atomic>
+#include <vector>
-class BmapEntity {
-public:
- static int64_t size();
- virtual int64_t get_index() { return -1; }
- virtual bool is_allocated(int64_t start, int64_t num) = 0;
-
- virtual ~BmapEntity() { }
-};
-
-
-class BmapEntityList {
- BmapEntity **m_items;
- int64_t m_num_items;
-
+class BitAllocatorStats {
public:
-
- BmapEntityList(BmapEntity **list, int64_t len) {
- m_items = list;
- m_num_items = len;
+ bool m_on;
+ std::atomic<int64_t> m_total_alloc_calls;
+ std::atomic<int64_t> m_total_free_calls;
+ std::atomic<int64_t> m_total_allocated;
+ std::atomic<int64_t> m_total_freed;
+ std::atomic<int64_t> m_total_serial_scans;
+ std::atomic<int64_t> m_total_concurrent_scans;
+ std::atomic<int64_t> m_total_node_scanned;
+
+ BitAllocatorStats() {
+ m_total_alloc_calls = 0;
+ m_total_free_calls = 0;
+ m_total_allocated = 0;
+ m_total_freed = 0;
+ m_total_serial_scans = 0;
+ m_total_concurrent_scans = 0;
+ m_total_node_scanned = 0;
}
- virtual ~BmapEntityList() { }
-
- BmapEntity *get_nth_item(int64_t idx) {
- return m_items[idx];
+ void add_alloc_calls(int64_t val) {
+ std::atomic_fetch_add(&m_total_alloc_calls, val);
}
-
- BmapEntity** get_item_list() {
- return m_items;
+ void add_free_calls(int64_t val) {
+ std::atomic_fetch_add(&m_total_free_calls, val);
}
-
- virtual int64_t incr_marker(int64_t add) = 0;
- virtual int64_t get_marker() = 0;
- virtual void set_marker(int64_t val) = 0;
-
- int64_t size() {
- return m_num_items;
+ void add_allocated(int64_t val) {
+ std::atomic_fetch_add(&m_total_allocated, val);
+ }
+ void add_freed(int64_t val) {
+ std::atomic_fetch_add(&m_total_freed, val);
+ }
+ void add_serial_scans(int64_t val) {
+ std::atomic_fetch_add(&m_total_serial_scans, val);
+ }
+ void add_concurrent_scans(int64_t val) {
+ std::atomic_fetch_add(&m_total_concurrent_scans, val);
+ }
+ void add_node_scanned(int64_t val) {
+ std::atomic_fetch_add(&m_total_node_scanned, val);
}
};
-
-class BmapEntityListIter {
- BmapEntityList *m_list;
+template <class BitMapEntity>
+class BitMapEntityIter {
+ std::vector<BitMapEntity> *m_list;
int64_t m_start_idx;
int64_t m_cur_idx;
bool m_wrap;
bool m_wrapped;
+ bool m_end;
public:
- BmapEntityListIter(BmapEntityList *list);
-
- BmapEntityListIter(BmapEntityList *list, bool wrap);
+ void init(std::vector<BitMapEntity> *list, bool wrap, int64_t start_idx) {
+ m_list = list;
+ m_wrap = wrap;
+ m_start_idx = start_idx;
+ m_cur_idx = m_start_idx;
+ m_wrapped = false;
+ m_end = false;
+ }
- BmapEntityListIter(BmapEntityList *list, int64_t start_idx);
+ BitMapEntityIter(std::vector<BitMapEntity> *list, int64_t start_idx) {
+ init(list, false, start_idx);
+ }
+ BitMapEntityIter(std::vector<BitMapEntity> *list, int64_t start_idx, bool wrap) {
+ init(list, wrap, start_idx);
+ }
- BmapEntityListIter(BmapEntityList *list, int64_t start_idx, bool wrap);
+ BitMapEntity *next() {
+ int64_t cur_idx = m_cur_idx;
+
+ if (m_wrapped &&
+ cur_idx == m_start_idx) {
+ /*
+ * End of wrap cycle + 1
+ */
+ if (!m_end) {
+ m_end = true;
+ //return m_list->get_nth_item(cur_idx);
+ return &(*m_list)[cur_idx];
+ }
+ return NULL;
+ }
+ m_cur_idx++;
+
+ if (m_cur_idx == m_list->size() &&
+ m_wrap) {
+ m_cur_idx %= m_list->size();
+ m_wrapped = true;
+ }
+
+ if (cur_idx == m_list->size()) {
+ /*
+ * End of list
+ */
+ return NULL;
+ }
+
+ debug_assert(cur_idx < m_list->size());
+ return &(*m_list)[cur_idx];
+ }
- BmapEntity *next();
- int64_t index();
- void decr_idx();
+ int64_t index() {
+ return m_cur_idx;
+ }
+ void decr_idx() {
+ m_cur_idx--;
+ debug_assert(m_cur_idx > 0);
+ }
};
typedef unsigned long bmap_t;
-class BmapEntry: public BmapEntity {
+class BmapEntry {
private:
- std::atomic<bmap_t> m_bits;
+ bmap_t m_bits;
+
public:
static bmap_t full_bmask();
static int64_t size();
bmap_t bit_mask(int bit_num);
bmap_t atomic_fetch();
BmapEntry(bool val);
+ BmapEntry() {
+ m_bits = 0;
+ }
+ BmapEntry(const BmapEntry& bmap) {
+ bmap_t i = bmap.m_bits;
+ m_bits = i;
+ }
void clear_bit(int bit);
void clear_bits(int offset, int num_bits);
int64_t *alloc_list, int64_t block_offset,
int64_t *scanned);
- virtual ~BmapEntry();
+ ~BmapEntry();
};
-class BmapList: public BmapEntityList {
+typedef enum bmap_area_type {
+ ZONE = 1,
+ LEAF = 2,
+ NON_LEAF = 3
+} bmap_area_type_t;
+
+class BitMapArea {
+
+protected:
+ int16_t m_area_index;
+ bmap_area_type_t m_type;
- std::atomic<int64_t> m_marker;
public:
- BmapList(BmapEntity **bmaps, int len, int64_t marker):
- BmapEntityList(bmaps, len) {
- m_marker = marker;
+ static int64_t get_span_size();
+ bmap_area_type_t level_to_type(int level);
+ static int get_level(int64_t total_blocks);
+ virtual bool is_allocated(int64_t start_block, int64_t num_blocks) = 0;
+ virtual bool is_allocated(int64_t *blocks, int64_t num_blocks, int blk_off) {
+ debug_assert(0);
+ return true;
+ }
+ virtual bool is_exhausted() = 0;
+ virtual bool child_check_n_lock(BitMapArea *child, int64_t required) {
+ debug_assert(0);
+ return true;
+ }
+ virtual bool child_check_n_lock(BitMapArea *child, int64_t required, bool lock) {
+ debug_assert(0);
+ return true;
+ }
+ virtual void child_unlock(BitMapArea *child) {
+ debug_assert(0);
}
- int64_t incr_marker(int64_t add);
- void set_marker(int64_t val);
- int64_t get_marker();
-};
+ virtual void lock_excl() = 0;
+ virtual bool lock_excl_try() {
+ debug_assert(0);
+ return false;
+ }
+ virtual void lock_shared() {
+ debug_assert(0);
+ return;
+ }
+ virtual void unlock() = 0;
+
+ virtual int64_t sub_used_blocks(int64_t num_blocks) = 0;
+ virtual int64_t add_used_blocks(int64_t num_blocks) = 0;
+ virtual bool reserve_blocks(int64_t num_blocks) = 0;
+ virtual void unreserve(int64_t num_blocks, int64_t allocated) = 0;
+ virtual int64_t get_reserved_blocks() = 0;
+ virtual int64_t get_used_blocks() = 0;
+
+ virtual void shutdown() = 0;
+ virtual int64_t alloc_blocks(bool wait, int64_t num_blocks, int64_t *start_block) {
+ debug_assert(0);
+ return 0;
+ }
+ virtual int64_t alloc_blocks(int64_t num_blocks, int64_t *start_block) {
+ debug_assert(0);
+ return 0;
+ }
+ virtual int64_t alloc_blocks_dis(bool wait, int64_t num_blocks,
+ int64_t blk_off, int64_t *block_list) {
+ debug_assert(0);
+ return 0;
+ }
+ virtual int64_t alloc_blocks_dis(int64_t num_blocks,
+ int64_t blk_offset, int64_t *block_list) {
+ debug_assert(0);
+ return 0;
+ }
+ virtual void set_blocks_used(int64_t start_block, int64_t num_blocks) = 0;
+ virtual void free_blocks(int64_t start_block, int64_t num_blocks) = 0;
+ virtual int64_t size() = 0;
-class BitMapZone: public BmapEntity {
+ int64_t child_count();
+ int64_t get_index();
+ int64_t get_level();
+ bmap_area_type_t get_type();
+ virtual ~BitMapArea() { }
+};
+
+class BitMapAreaList {
private:
- int64_t m_total_blocks;
- int64_t m_zone_num;
- std::atomic<int64_t> m_used_blocks;
+ BitMapArea **m_items;
+ int64_t m_num_items;
+ std::mutex m_marker_mutex;
- BmapEntityList *m_bmap_list;
+public:
+ BitMapArea *get_nth_item(int64_t idx) {
+ return m_items[idx];
+ }
- enum {ZONE_FREE = 0, ZONE_ACTIVE = 1} m_state;
- std::mutex m_lock;
+ BitMapArea ** get_item_list() {
+ return m_items;
+ }
- int64_t alloc_cont_bits(int64_t num_blocks, BmapEntityListIter *iter, int64_t *bmap_out_idx);
- void free_blocks_int(int64_t start_block, int64_t num_blocks);
- void init(int64_t zone_num, int64_t total_blocks, bool def);
+ int64_t size() {
+ return m_num_items;
+ }
+ BitMapAreaList(BitMapArea **list, int64_t len);
+ BitMapAreaList(BitMapArea **list, int64_t len, int64_t marker);
+ BitMapArea **get_list() {
+ return m_items;
+ }
+};
+
+class BmapEntityListIter {
+ BitMapAreaList *m_list;
+ int64_t m_start_idx;
+ int64_t m_cur_idx;
+ bool m_wrap;
+ bool m_wrapped;
+ bool m_end;
public:
- BitMapZone(int64_t zone_num, int64_t total_blocks);
- BitMapZone(int64_t zone_num, int64_t total_blocks, bool def);
- bool lock_zone(bool wait);
- void unlock_zone();
- int64_t get_used_blocks();
+ void init(BitMapAreaList *list, int64_t start_idx, bool wrap);
+ BmapEntityListIter(BitMapAreaList *list);
- int64_t add_used_blocks(int64_t blks);
- int64_t sub_used_blocks(int64_t blks);
+ BmapEntityListIter(BitMapAreaList *list, bool wrap);
- int64_t alloc_blocks(int64_t num_blocks, int64_t *start_block);
- void set_blocks_used(int64_t start_block, int64_t num_blocks);
- void free_blocks(int64_t start_block, int64_t num_blocks);
+ BmapEntityListIter(BitMapAreaList *list, int64_t start_idx);
+
+ BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap);
- int64_t alloc_blocks_dis(int64_t num_blocks, int64_t *allocated_blocks);
+ BitMapArea *next();
+ int64_t index();
+ void decr_idx();
+};
+class BitMapZone: public BitMapArea{
+
+private:
+ std::atomic<int32_t> m_used_blocks;
+ std::vector <BmapEntry> *m_bmap_list;
+ std::mutex m_lock;
+
+public:
+ static int64_t count;
+ static int64_t total_blocks;
+ static void incr_count() { count++;}
+ static int64_t get_total_blocks() {return total_blocks;}
+ bool is_allocated(int64_t start_block, int64_t num_blocks);
bool is_exhausted();
- bool is_allocated(int64_t stat_block, int64_t num_blocks);
- int64_t get_index();
- int64_t size();
void reset_marker();
- virtual ~BitMapZone();
+ int64_t sub_used_blocks(int64_t num_blocks);
+ int64_t add_used_blocks(int64_t num_blocks);
+ bool reserve_blocks(int64_t num_blocks);
+ void unreserve(int64_t num_blocks, int64_t allocated);
+ int64_t get_reserved_blocks();
+ int64_t get_used_blocks();
+ int64_t size() {
+ return get_total_blocks();
+ }
+
+ void lock_excl();
+ bool lock_excl_try();
+ void unlock();
+ bool check_locked();
+
+ int64_t alloc_cont_bits(int64_t num_blocks,
+ BitMapEntityIter<BmapEntry> *iter, int64_t *bmap_out_idx);
+ void free_blocks_int(int64_t start_block, int64_t num_blocks);
+ void init(int64_t zone_num, int64_t total_blocks, bool def);
+
+ BitMapZone(int64_t total_blocks, int64_t zone_num);
+ BitMapZone(int64_t total_blocks, int64_t zone_num, bool def);
+
+ ~BitMapZone();
+ void shutdown();
+
+ int64_t alloc_blocks(int64_t num_blocks, int64_t *start_block);
+ int64_t alloc_blocks_dis(int64_t num_blocks, int64_t blk_off, int64_t *block_list);
+ void set_blocks_used(int64_t start_block, int64_t num_blocks);
+
+ void free_blocks(int64_t start_block, int64_t num_blocks);
};
-typedef enum bmap_alloc_mode {
- SERIAL = 1,
- CONCURRENT = 2,
-} bmap_alloc_mode_t;
+class BitMapAreaIN: public BitMapArea{
+protected:
+ int64_t m_child_size_blocks;
+ int64_t m_total_blocks;
+ int16_t m_level;
+ int16_t m_num_child;
-class ZoneList : public BmapEntityList {
+ int64_t m_used_blocks;
+ int64_t m_reserved_blocks;
+ std::mutex m_blocks_lock;
+ BitMapAreaList *m_child_list;
-private:
- int64_t m_marker;
- std::mutex m_marker_mutex;
+ bool is_allocated(int64_t start_block, int64_t num_blocks);
+ bool is_allocated(int64_t *blocks, int64_t num_blocks, int64_t blk_off);
+ virtual bool is_exhausted();
+ virtual bool child_check_n_lock(BitMapArea *child, int64_t required);
+ virtual void child_unlock(BitMapArea *child);
+
+ virtual void lock_excl() {
+ return;
+ }
+ virtual void lock_shared() {
+ return;
+ }
+ virtual void unlock() {
+ return;
+ }
+
+ void init(int64_t total_blocks, int64_t zone_size_block, bool def);
+ void init_common(int64_t total_blocks, int64_t zone_size_block, bool def);
public:
+ BitMapAreaIN();
+ BitMapAreaIN(int64_t zone_num, int64_t total_blocks);
+ BitMapAreaIN(int64_t zone_num, int64_t total_blocks, bool def);
- ZoneList(BmapEntity **list, int64_t len);
- ZoneList(BmapEntity **list, int64_t len, int64_t marker);
- int64_t incr_marker(int64_t add);
- int64_t get_marker();
- void set_marker(int64_t val);
+ virtual ~BitMapAreaIN();
+ void shutdown();
+ virtual int64_t sub_used_blocks(int64_t num_blocks);
+ virtual int64_t add_used_blocks(int64_t num_blocks);
+ virtual bool reserve_blocks(int64_t num_blocks);
+ virtual void unreserve(int64_t num_blocks, int64_t allocated);
+ virtual int64_t get_reserved_blocks();
+ virtual int64_t get_used_blocks();
+ virtual int64_t size() {
+ return m_total_blocks;
+ }
+ virtual int64_t alloc_blocks_int(bool wait, bool wrap,
+ int64_t num_blocks, int64_t *start_block);
+ virtual int64_t alloc_blocks(bool wait, int64_t num_blocks, int64_t *start_block);
+ virtual int64_t alloc_blocks_dis_int(bool wait, int64_t num_blocks,
+ int64_t blk_off, int64_t *block_list);
+ virtual int64_t alloc_blocks_dis(bool wait, int64_t num_blocks,
+ int64_t blk_off, int64_t *block_list);
+ virtual void set_blocks_used_int(int64_t start_block, int64_t num_blocks);
+ virtual void set_blocks_used(int64_t start_block, int64_t num_blocks);
+
+ virtual void free_blocks_int(int64_t start_block, int64_t num_blocks);
+ virtual void free_blocks(int64_t start_block, int64_t num_blocks);
};
-class BitAllocator {
+class BitMapAreaLeaf: public BitMapAreaIN{
+
private:
- bmap_alloc_mode_t m_alloc_mode;
- int64_t m_zone_size_blocks;
- int64_t m_total_zones;
+ void init(int64_t total_blocks, int64_t zone_size_block,
+ bool def);
- int64_t m_total_blocks;
- std::atomic<int64_t> m_allocated_blocks;
- int64_t m_reserved_blocks;
- std::mutex m_res_blocks_lock;
- BmapEntityList *m_zone_list;
+public:
+ static int64_t count;
+ static void incr_count() { count++;}
+ BitMapAreaLeaf() { }
+ BitMapAreaLeaf(int64_t zone_num, int64_t total_blocks);
+ BitMapAreaLeaf(int64_t zone_num, int64_t total_blocks, bool def);
+ bool child_check_n_lock(BitMapArea *child, int64_t required, bool lock);
+ void child_unlock(BitMapArea *child);
+
+ int64_t alloc_blocks_int(bool wait, bool wrap,
+ int64_t num_blocks, int64_t *start_block);
+ int64_t alloc_blocks_dis_int(bool wait, int64_t num_blocks,
+ int64_t blk_off, int64_t *block_list);
+ void free_blocks_int(int64_t start_block, int64_t num_blocks);
+
+ virtual ~BitMapAreaLeaf();
+};
- enum {ALLOC_DESTROY = 0, ALLOC_ACTIVE = 1} m_state;
+typedef enum bmap_alloc_mode {
+ SERIAL = 1,
+ CONCURRENT = 2,
+} bmap_alloc_mode_t;
+
+class BitAllocator:public BitMapAreaIN{
+private:
+ bmap_alloc_mode_t m_alloc_mode;
std::mutex m_serial_mutex;
- pthread_rwlock_t m_alloc_slow_lock;
+ pthread_rwlock_t m_rw_lock;
+ BitAllocatorStats *m_stats;
+ bool m_is_stats_on;
- bool is_allocated(int64_t start_block, int64_t num_blocks);
- bool is_allocated(int64_t *blocks, int64_t num_blocks);
+ bool is_stats_on() {
+ return m_is_stats_on;
+ }
- int64_t alloc_blocks_int(int64_t num_blocks, int64_t *start_block, bool lock);
- int64_t alloc_blocks_dis_int(int64_t num_blocks, int64_t *block_list, bool lock);
- void free_blocks_int(int64_t start_block, int64_t num_blocks);
- bool zone_free_to_alloc(BmapEntity *zone, int64_t required, bool lock);
+ bool child_check_n_lock(BitMapArea *child, int64_t required);
+ virtual void child_unlock(BitMapArea *child);
void serial_lock();
void serial_unlock();
+ void lock_excl();
+ void lock_shared();
+ void unlock();
- void alloc_lock(bool write);
- void alloc_unlock();
bool check_input(int64_t num_blocks);
bool check_input_dis(int64_t num_blocks);
- void unreserve(int64_t needed, int64_t allocated);
- void init(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode, bool def);
+ void init_check(int64_t total_blocks, int64_t zone_size_block,
+ bmap_alloc_mode_t mode, bool def, bool stats_on);
public:
BitAllocator(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode);
BitAllocator(int64_t total_blocks, int64_t zone_size_block, bmap_alloc_mode_t mode, bool def);
+ BitAllocator(int64_t total_blocks, int64_t zone_size_block,
+ bmap_alloc_mode_t mode, bool def, bool stats_on);
~BitAllocator();
void shutdown();
int64_t alloc_blocks(int64_t num_blocks, int64_t *start_block);
int64_t alloc_blocks_res(int64_t num_blocks, int64_t *start_block);
void free_blocks(int64_t start_block, int64_t num_blocks);
void set_blocks_used(int64_t start_block, int64_t num_blocks);
- int64_t sub_used_blocks(int64_t num_blocks);
- int64_t add_used_blocks(int64_t num_blocks);
- int64_t get_used_blocks();
- int64_t get_reserved_blocks();
- int64_t size();
- bool reserve_blocks(int64_t blocks);
void unreserve_blocks(int64_t blocks);
int64_t alloc_blocks_dis(int64_t num_blocks, int64_t *block_list);
void free_blocks_dis(int64_t num_blocks, int64_t *block_list);
+
+ BitAllocatorStats *get_stats() {
+ return m_stats;
+ }
};
#endif //End of file
#include "os/bluestore/BitAllocator.h"
#include <stdio.h>
#include <assert.h>
-#define debug_assert assert
-#define NUM_THREADS 16
-#define MAX_BLOCKS (1024 * 1024 * 4)
+#include <math.h>
+#include <gtest/gtest.h>
-void test_bmap_iter()
+#define bmap_test_assert(x) EXPECT_EQ(true, (x))
+#define NUM_THREADS 16
+#define MAX_BLOCKS (1024 * 1024 * 16)
+
+TEST(BitAllocator, test_bmap_iter)
{
int num_items = 5;
int off = 2;
- class BmapEntityTmp : public BmapEntity {
+
+ class BmapEntityTmp {
int64_t m_num;
int64_t m_len;
public:
+ void init(int index) {
+ m_num = index;
+ }
+ BmapEntityTmp() {
+
+ }
BmapEntityTmp(int num) {
m_num = num;
m_len = num;
return true;
}
};
-
- class BmapEntityListTmp: public BmapEntityList {
- private:
- int64_t m_marker;
- public:
- BmapEntityListTmp(BmapEntity **bmaps, int len, int64_t marker):
- BmapEntityList(bmaps, len) {
- m_marker = marker;
- }
-
- int64_t incr_marker(int64_t add)
- {
- return m_marker++;
- }
- void set_marker(int64_t val)
- {
- m_marker = val;
- }
- int64_t get_marker()
- {
- return m_marker;
- }
-
- };
-
- int i = 0;
BmapEntityTmp *obj = NULL;
-
- BmapEntity **bmap = new BmapEntity*[num_items];
+ int i = 0;
+ std::vector<BmapEntityTmp> *arr = new std::vector<BmapEntityTmp>(num_items);
for (i = 0; i < num_items; i++) {
- bmap[i] = new BmapEntityTmp(i);
+ (*arr)[i].init(i);
}
-
- BmapEntityList *list = new BmapEntityListTmp(bmap, num_items, 0);
- BmapEntityListIter *iter = new BmapEntityListIter(list, off, false);
+ //BitMapList<BmapEntityTmp> *list = new BitMapList<BmapEntityTmp>(arr, num_items, 0);
+ BitMapEntityIter<BmapEntityTmp> iter = BitMapEntityIter<BmapEntityTmp>(arr, off, false);
i = off;
int count = 0;
int64_t last_idx = off;
- while ((obj = (BmapEntityTmp*) iter->next())) {
- debug_assert(obj->get_index() == last_idx);
- debug_assert(obj->get_index() == i);
- debug_assert(obj == bmap[i]);
- last_idx = iter->index();
+ while ((obj = iter.next())) {
+ bmap_test_assert(obj->get_index() == last_idx);
+ bmap_test_assert(obj->get_index() == i);
+ bmap_test_assert(obj == &(*arr)[i]);
+ last_idx = iter.index();
i++;
count++;
}
- debug_assert(i == num_items);
- debug_assert(count == num_items - off);
-
- delete iter;
+ bmap_test_assert(i == num_items);
+ bmap_test_assert(count == num_items - off);
- iter = new BmapEntityListIter(list, off, true);
+ iter = BitMapEntityIter<BmapEntityTmp>(arr, off, true);
i = off;
last_idx = off;
count = 0;
- while ((obj = (BmapEntityTmp*) iter->next())) {
- debug_assert(obj->get_index() == last_idx);
- debug_assert(obj->get_index() == i);
- debug_assert(obj == bmap[i]);
- last_idx = iter->index();
-
+ while ((obj = iter.next())) {
+ bmap_test_assert(obj->get_index() == last_idx);
+ bmap_test_assert(obj->get_index() == i);
+ bmap_test_assert(obj == &(*arr)[i]);
+ last_idx = iter.index();
i = (i + 1) % num_items;
count++;
}
- debug_assert(i == off);
- debug_assert(count == num_items);
+ bmap_test_assert(i == off + 1);
+ bmap_test_assert(count == num_items + 1);
+ delete arr;
+ //delete list;
num_items = 4;
off = num_items - 1;
- //list = new BmapEntityList(bmap, num_items);
- list = new BmapEntityListTmp(bmap, num_items, 0);
+ arr = new std::vector<BmapEntityTmp>(num_items);
+ for (i = 0; i < num_items; i++) {
+ (*arr)[i].init(i);
+ }
+// list = new BitMapList<BmapEntityTmp>(arr, num_items, 0);
+ iter = BitMapEntityIter<BmapEntityTmp>(arr, off, true);
+ i = off;
+ last_idx = off;
+ count = 0;
+ while ((obj = (BmapEntityTmp*) iter.next())) {
+ bmap_test_assert(obj->get_index() == last_idx);
+ bmap_test_assert(obj->get_index() == i);
+ bmap_test_assert(obj == &(*arr)[i]);
+ last_idx = iter.index();
+ i = (i + 1) % num_items;
+ count++;
+ }
+ bmap_test_assert(i == (off + 1)%num_items);
+ bmap_test_assert(count == num_items + 1);
+
+ /*
+ * BitMapArea Iter tests.
+ */
+ BitMapArea *area = NULL;
+ BitMapArea **children = new BitMapArea*[num_items];
+ for (i = 0; i < num_items; i++) {
+ children[i] = new BitMapAreaLeaf(BitMapArea::get_span_size(), i, false);
+ }
+
+ off = 0;
+ BitMapAreaList *area_list = new BitMapAreaList(children, num_items);
+ BmapEntityListIter area_iter = BmapEntityListIter(
+ area_list, (int64_t) 0);
+ i = off;
+ last_idx = off;
+ count = 0;
+ while ((area = area_iter.next())) {
+ bmap_test_assert(area->get_index() == last_idx);
+ bmap_test_assert(area->get_index() == i);
+ bmap_test_assert(area == children[i]);
+ last_idx = area_iter.index();
+ i = (i + 1) % num_items;
+ count++;
+ }
+ bmap_test_assert(i == off);
+ bmap_test_assert(count == num_items);
- iter = new BmapEntityListIter(list, off, true);
+ // offset 0
+ off = 0;
+ area_iter = BmapEntityListIter(area_list, off, true);
i = off;
last_idx = off;
count = 0;
- while ((obj = (BmapEntityTmp*) iter->next())) {
- debug_assert(obj->get_index() == last_idx);
- debug_assert(obj->get_index() == i);
- debug_assert(obj == bmap[i]);
- last_idx = iter->index();
+ while ((area = area_iter.next())) {
+ bmap_test_assert(area->get_index() == last_idx);
+ bmap_test_assert(area->get_index() == i);
+ bmap_test_assert(area == children[i]);
+ last_idx = area_iter.index();
i = (i + 1) % num_items;
count++;
}
- debug_assert(i == off);
- debug_assert(count == num_items);
+ bmap_test_assert(i == (off + 1)%num_items);
+ bmap_test_assert(count == num_items + 1);
}
-void test_bmap_entry()
+TEST(BitAllocator, test_bmap_entry)
{
int i = 0;
int start = 0;
// Clear bits one by one and check they are cleared
for (i = 0; i < size; i++) {
bmap->clear_bit(i);
- debug_assert(!bmap->check_bit(i));
+ bmap_test_assert(!bmap->check_bit(i));
}
// Set all bits again using set_bits
// clear 4 bits at a time and then check allocated
for (i = 0; i < size/4; i++) {
bmap->clear_bits(i * 4, 4);
- debug_assert(!bmap->is_allocated(i * 4, 4));
+ bmap_test_assert(!bmap->is_allocated(i * 4, 4));
}
// set all bits again
// clear alternate bits, check and set those bits
for (i = 0; i < size/2; i++) {
bmap->clear_bit(i * 2 + 1);
- debug_assert(!bmap->check_bit(i * 2 + 1));
- debug_assert(bmap->check_n_set_bit(i * 2 + 1));
+ bmap_test_assert(!bmap->check_bit(i * 2 + 1));
+ bmap_test_assert(bmap->check_n_set_bit(i * 2 + 1));
}
// free 1, 2 and size bits at a time and try to find n cont bits
for (i = 0; i < size / 4; i++) {
bmap->clear_bits(i * 2 + 1, i + 1);
- debug_assert(!bmap->check_bit(i * 2 + 1));
- debug_assert(bmap->find_n_cont_bits(i * 2 + 1, i + 1) ==
+ bmap_test_assert(!bmap->check_bit(i * 2 + 1));
+ bmap_test_assert(bmap->find_n_cont_bits(i * 2 + 1, i + 1) ==
i + 1);
}
// free 1, 2 and size bits at a time and try to find any cont bits
for (i = 0; i < size / 4; i++) {
bmap->clear_bits(i * 2 + 1, i + 1);
- debug_assert(!bmap->is_allocated(i * 2 + 1, i + 1));
+ bmap_test_assert(!bmap->is_allocated(i * 2 + 1, i + 1));
}
for (i = 0; i < size / 4; i++) {
bmap->clear_bits(i * 2 + 1, i + 1);
allocated = bmap->find_first_set_bits(i + 1, 0, &start, &scanned);
- debug_assert(allocated == i + 1);
- debug_assert(scanned == ((i * 2 + 1) + (i + 1)));
- debug_assert(start == i * 2 + 1);
+ bmap_test_assert(allocated == i + 1);
+ bmap_test_assert(scanned == ((i * 2 + 1) + (i + 1)));
+ bmap_test_assert(start == i * 2 + 1);
bmap->set_bits(0, BmapEntry::size());
}
bmap->clear_bits(BmapEntry::size() - 6, 6);
allocated = bmap->find_first_set_bits(6, 0, &start, &scanned);
- debug_assert(allocated == 6);
- debug_assert(scanned == BmapEntry::size() - 6 + 6);
- debug_assert(start == BmapEntry::size() - 6);
- debug_assert(bmap->is_allocated(start, 6));
+ bmap_test_assert(allocated == 6);
+ bmap_test_assert(scanned == BmapEntry::size() - 6 + 6);
+ bmap_test_assert(start == BmapEntry::size() - 6);
+ bmap_test_assert(bmap->is_allocated(start, 6));
delete bmap;
bmap = new BmapEntry(false);
bmap->set_bits(4, BmapEntry::size() - 4);
- debug_assert(bmap->is_allocated(4, BmapEntry::size() - 4));
- debug_assert(!bmap->is_allocated(0, 4));
+ bmap_test_assert(bmap->is_allocated(4, BmapEntry::size() - 4));
+ bmap_test_assert(!bmap->is_allocated(0, 4));
bmap->set_bits(0, 4);
- debug_assert(bmap->is_allocated(0, BmapEntry::size()));
-
-
+ bmap_test_assert(bmap->is_allocated(0, BmapEntry::size()));
}
-void test_zone_alloc()
+TEST(BitAllocator, test_zone_alloc)
{
int total_blocks = 1024;
int64_t blks = 1;
int64_t start_blk = 0;
int64_t allocated = 0;
- BitMapZone *zone = new BitMapZone(0, total_blocks);
+ BitMapZone *zone = new BitMapZone(total_blocks, 0);
// Allocate all blocks and see that it is allocating in order.
+ bool lock = zone->lock_excl_try();
+ bmap_test_assert(lock);
+
for (int i = 0; i < total_blocks; i++) {
allocated = zone->alloc_blocks(blks, &start_blk);
- debug_assert(last_blk + 1 == start_blk);
- debug_assert(allocated == blks);
+ bmap_test_assert(last_blk + 1 == start_blk);
+ bmap_test_assert(allocated == blks);
last_blk = start_blk;
}
- debug_assert(zone->get_used_blocks() == total_blocks);
+ bmap_test_assert(zone->get_used_blocks() == total_blocks);
for (int i = 0; i < total_blocks; i++) {
- debug_assert(zone->get_used_blocks() == total_blocks - i);
+ bmap_test_assert(zone->get_used_blocks() == total_blocks - i);
zone->free_blocks(i, blks);
}
blks = 2;
last_blk = -2;
- debug_assert(zone->is_exhausted());
for (int i = 0; i < total_blocks/2; i++) {
allocated = zone->alloc_blocks(blks, &start_blk);
- debug_assert(last_blk + 2 == start_blk);
+ bmap_test_assert(last_blk + 2 == start_blk);
last_blk = start_blk;
}
// Free different boundaries and allocate those
blks = 3;
- debug_assert(zone->is_exhausted());
+ bmap_test_assert(zone->is_exhausted());
zone->free_blocks(BmapEntry::size() - blks, blks);
zone->free_blocks(BmapEntry::size(), blks);
allocated = zone->alloc_blocks(blks * 2, &start_blk);
- debug_assert(BmapEntry::size() - blks == start_blk);
- debug_assert(allocated == blks * 2);
+ bmap_test_assert(BmapEntry::size() - blks == start_blk);
+ bmap_test_assert(allocated == blks * 2);
blks = 4;
zone->free_blocks(BmapEntry::size() * 2 - blks, 2 * blks);
allocated = zone->alloc_blocks(2 * blks, &start_blk);
- debug_assert(BmapEntry::size() * 2 - blks == start_blk);
- debug_assert(allocated == blks * 2);
+ bmap_test_assert(BmapEntry::size() * 2 - blks == start_blk);
+ bmap_test_assert(allocated == blks * 2);
- zone->reset_marker();
blks = BmapEntry::size() * 2;
zone->free_blocks(BmapEntry::size() * 6 - blks, blks);
allocated = zone->alloc_blocks(blks, &start_blk);
- debug_assert(BmapEntry::size() * 6 - blks == start_blk);
+ bmap_test_assert(BmapEntry::size() * 6 - blks == start_blk);
// free blocks at distance 1, 2 up to 63 and allocate all of them
// together using disc alloc.
- zone->reset_marker();
blks = BmapEntry::size() * 2;
int num_bits = 1;
for (int i = 0; i < zone->size() / BmapEntry::size() -1; i++) {
-
zone->free_blocks(i * BmapEntry::size(), num_bits);
num_bits++;
}
- zone->reset_marker();
num_bits = 1;
int64_t start_block = 0;
for (int i = 0; i < zone->size() / BmapEntry::size() -1; i++) {
allocated = zone->alloc_blocks(num_bits, &start_block);
- debug_assert(num_bits == allocated);
- debug_assert(start_block == i * BmapEntry::size());
+ bmap_test_assert(num_bits == allocated);
+ bmap_test_assert(start_block == i * BmapEntry::size());
num_bits++;
}
delete zone;
// non-conti allocations test
- zone = new BitMapZone(0, total_blocks);
+ zone = new BitMapZone(total_blocks, 0);
+ lock = zone->lock_excl_try();
+ bmap_test_assert(lock);
int64_t blocks[1024] = {0};
for (int i = 0; i < zone->size(); i++) {
allocated = zone->alloc_blocks(1, &start_block);
- debug_assert(allocated == 1);
+ bmap_test_assert(allocated == 1);
}
for (int i = 0; i < zone->size(); i += 2) {
zone->free_blocks(i, 1);
}
- zone->reset_marker();
- allocated = zone->alloc_blocks_dis(zone->size() / 2, blocks);
- debug_assert(allocated == zone->size() / 2);
+ allocated = zone->alloc_blocks_dis(zone->size() / 2, 0, blocks);
+ bmap_test_assert(allocated == zone->size() / 2);
}
-void test_bitmap_alloc() {
+TEST(BitAllocator, test_bmap_alloc)
+{
int64_t total_blocks = 1024 * 4;
int64_t zone_size = 1024;
int64_t allocated = 0;
for (int64_t iter = 0; iter < 4; iter++) {
for (int64_t i = 0; i < total_blocks; i++) {
allocated = alloc->alloc_blocks(1, &start_block);
- debug_assert(allocated == 1);
- debug_assert(start_block == i);
+ bmap_test_assert(allocated == 1);
+ bmap_test_assert(start_block == i);
}
for (int64_t i = 0; i < total_blocks; i++) {
for (int64_t iter = 0; iter < 4; iter++) {
for (int64_t i = 0; i < total_blocks / zone_size; i++) {
allocated = alloc->alloc_blocks(zone_size, &start_block);
- debug_assert(allocated == zone_size);
- debug_assert(start_block == i * zone_size);
+ bmap_test_assert(allocated == zone_size);
+ bmap_test_assert(start_block == i * zone_size);
}
for (int64_t i = 0; i < total_blocks / zone_size; i++) {
}
allocated = alloc->alloc_blocks(1, &start_block);
- debug_assert(allocated == 1);
+ bmap_test_assert(allocated == 1);
allocated = alloc->alloc_blocks(zone_size - 1, &start_block);
- debug_assert(allocated == zone_size - 1);
- debug_assert(start_block == 1);
+ bmap_test_assert(allocated == zone_size - 1);
+ bmap_test_assert(start_block == 1);
allocated = alloc->alloc_blocks(1, &start_block);
- debug_assert(allocated == 1);
+ bmap_test_assert(allocated == 1);
allocated = alloc->alloc_blocks(zone_size, &start_block);
- debug_assert(allocated == zone_size);
- debug_assert(start_block == zone_size * 2);
+ bmap_test_assert(allocated == zone_size);
+ bmap_test_assert(start_block == zone_size * 2);
// Dis contiguous blocks allocations
delete alloc;
int64_t blocks[2048] = {0};
for (int64_t i = 0; i < alloc->size(); i++) {
allocated = alloc->alloc_blocks(1, &start_block);
- debug_assert(allocated == 1);
+ bmap_test_assert(allocated == 1);
}
for (int i = 0; i < alloc->size(); i += 2) {
alloc->free_blocks(i, 1);
}
allocated = alloc->alloc_blocks_dis(alloc->size()/2, blocks);
- debug_assert(allocated == alloc->size() / 2);
+ bmap_test_assert(allocated == alloc->size() / 2);
allocated = alloc->alloc_blocks_dis(1, blocks);
- debug_assert(allocated == 0);
+ bmap_test_assert(allocated == 0);
- alloc->free_blocks(alloc->size() / 2, 1);
+ alloc->free_blocks(alloc->size()/2, 1);
allocated = alloc->alloc_blocks_dis(1, blocks);
- debug_assert(allocated == 1);
- debug_assert(blocks[0] == alloc->size()/2);
+ bmap_test_assert(allocated == 1);
+ bmap_test_assert(blocks[0] == alloc->size()/2);
+
+ alloc->free_blocks(0, alloc->size());
+ delete alloc;
+
+ // Make three > 3 levels tree and check allocations and dealloc
+ // in a loop
+ int64_t alloc_size = 16;
+ total_blocks = pow(BITMAP_SPAN_SIZE, 2) * 4;
+ alloc = new BitAllocator(total_blocks, zone_size, CONCURRENT, false);
+ for (int64_t iter = 0; iter < 3; iter++) {
+ for (int64_t i = 0; i < total_blocks / alloc_size; i++) {
+ allocated = alloc->alloc_blocks(alloc_size, &start_block);
+ bmap_test_assert(allocated == alloc_size);
+ bmap_test_assert(start_block == i * alloc_size);
+ }
+
+ for (int64_t i = 0; i < total_blocks / alloc_size; i++) {
+ alloc->free_blocks(i * alloc_size, alloc_size);
+ }
+ }
delete alloc;
alloc = new BitAllocator(1024, zone_size, CONCURRENT, true);
alloc->free_blocks(1, 1023);
alloc->alloc_blocks(16, &start_block);
+ delete alloc;
+
+ total_blocks = pow(BITMAP_SPAN_SIZE, 2) * 4;
+ alloc_size = 16;
+ alloc = new BitAllocator(total_blocks, zone_size, CONCURRENT, false);
+ for (int64_t iter = 0; iter < 3; iter++) {
+ for (int64_t i = 0; i < total_blocks / alloc_size; i++) {
+ bmap_test_assert(alloc->reserve_blocks(alloc_size));
+ allocated = alloc->alloc_blocks_res(alloc_size, &start_block);
+ bmap_test_assert(allocated == alloc_size);
+ bmap_test_assert(start_block == i * alloc_size);
+ }
+
+ for (int64_t i = 0; i < total_blocks / alloc_size; i++) {
+ alloc->free_blocks(i * alloc_size, alloc_size);
+ }
+ }
+
+ delete alloc;
}
void
for (i = 0; i < num_blocks - 1; i++) {
if (blocks[i] > blocks[i + 1]) {
wraps++;
- debug_assert(wraps <= 1);
+ bmap_test_assert(wraps <= 1);
}
}
}
void
do_work(BitAllocator *alloc)
{
- int num_iters = 10;
+ int num_iters = 3;
int64_t alloced = 0;
int64_t start_block = -1;
- uint64_t alloc_unit = 1;
int64_t num_blocks = alloc->size() / NUM_THREADS;
int total_alloced = 0;
printf("Allocating in tid %d.\n", my_tid);
for (int i = 0; i < num_blocks; i++) {
alloced = alloc->alloc_blocks(1, &start_block);
- debug_assert(alloced == 1);
+ bmap_test_assert(alloced == 1);
total_alloced++;
allocated_blocks[i] = start_block;
}
return NULL;
}
-void test_bmap_alloc_concurrent()
+TEST(BitAllocator, test_bmap_alloc_concurrent)
{
-
- // Create an allocator
int64_t total_blocks = MAX_BLOCKS;
int64_t zone_size = 1024;
- int64_t allocated = 0;
- int64_t start_block = 0;
pthread_t pthreads[NUM_THREADS] = {0};
- debug_assert(total_blocks <= MAX_BLOCKS);
+ bmap_test_assert(total_blocks <= MAX_BLOCKS);
BitAllocator *alloc = new BitAllocator(total_blocks, zone_size, CONCURRENT);
-
- // Create N threads and each thread allocates at max
printf("Spawning %d threads for parallel test.....\n", NUM_THREADS);
for (int j = 0; j < NUM_THREADS; j++) {
// max_blks / num threads and free those. Make sure threads
// always gets blocks
-
// Do this with dis-contiguous and contiguous allocations
-
-
// do multithreaded allocation and check allocations are unique
-
}
-int main()
+int main(int argc, char **argv)
{
- test_bmap_entry();
- test_bmap_iter();
- test_zone_alloc();
- test_bitmap_alloc();
- test_bmap_alloc_concurrent();
-
-
- printf("All tests done : SUCCESS.\n");
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
}