#include <math.h>
#define debug_assert assert
-#define MIN(x, y) ((x) > (y) ? (y) : (x))
#define MAX_INT16 ((uint16_t) -1 >> 1)
#define MAX_INT32 ((uint32_t) -1 >> 1)
void BitAllocator::init_check(int64_t total_blocks, int64_t zone_size_block,
bmap_alloc_mode_t mode, bool def, bool stats_on)
{
- int64_t total_zones = 0;
+ int64_t unaligned_blocks = 0;
if (mode != SERIAL && mode != CONCURRENT) {
debug_assert(0);
debug_assert(0);
}
- truncated_blocks = total_blocks - (total_blocks / zone_size_block) * zone_size_block;
- total_blocks = (total_blocks / zone_size_block) * zone_size_block;
- total_zones = total_blocks / zone_size_block;
+ unaligned_blocks = total_blocks % zone_size_block;
+ total_blocks = ROUND_UP_TO(total_blocks, zone_size_block);
- debug_assert(total_blocks > 0);
- debug_assert(total_zones > 0);
m_alloc_mode = mode;
m_is_stats_on = stats_on;
if (m_is_stats_on) {
pthread_rwlock_init(&m_rw_lock, NULL);
init(total_blocks, 0, def);
+ if (!def && unaligned_blocks) {
+ /*
+ * Mark extra padded blocks used from begning.
+ */
+ set_blocks_used(total_blocks - (zone_size_block - unaligned_blocks),
+ (zone_size_block - unaligned_blocks));
+ }
}
void BitAllocator::lock_excl()
return;
}
+ debug_assert(start_block + num_blocks <= size());
if (is_stats_on()) {
m_stats->add_free_calls(1);
m_stats->add_freed(num_blocks);
return;
}
+ debug_assert(start_block + num_blocks <= size());
lock_shared();
serial_lock();
set_blocks_used_int(start_block, num_blocks);
#undef dout_prefix
#define dout_prefix *_dout << "bitmapalloc:"
-#define NEXT_MULTIPLE(x, m) (!(x) ? 0: (((((x) - 1) / (m)) + 1) * (m)))
BitMapAllocator::BitMapAllocator(int64_t device_size)
: m_num_uncommitted(0),
dout(10) << __func__ <<" instance "<< (uint64_t) this <<
" offset " << offset << " length " << length << dendl;
- offset = NEXT_MULTIPLE(offset, m_block_size);
-
- // bitallocator::init may decrease the size of blkdev.
- uint64_t total_size = m_bit_alloc->size() * m_block_size;
- if (offset + length > total_size) {
- assert(offset + length < total_size + m_bit_alloc->get_truncated_blocks() * m_block_size);
- length -= (offset + length) - total_size;
- }
-
- insert_free(offset, (length / m_block_size) * m_block_size);
+ insert_free(ROUND_UP_TO(offset, m_block_size),
+ (length / m_block_size) * m_block_size);
}
void BitMapAllocator::init_rm_free(uint64_t offset, uint64_t length)
alloc->free_blocks(0, alloc->size());
delete alloc;
+ // unaligned zones
+ total_blocks = 1024 * 2 + 11;
+ alloc = new BitAllocator(total_blocks, zone_size, CONCURRENT);
+
+ for (int64_t iter = 0; iter < 4; iter++) {
+ for (int64_t i = 0; i < total_blocks; i++) {
+ allocated = alloc->alloc_blocks(1, &start_block);
+ bmap_test_assert(allocated == 1);
+ bmap_test_assert(start_block == i);
+ }
+
+ for (int64_t i = 0; i < total_blocks; i++) {
+ alloc->free_blocks(i, 1);
+ }
+ }
+
// Make three > 3 levels tree and check allocations and dealloc
// in a loop
int64_t alloc_size = 16;