uint64_t pos_end, uint64_t length, uint64_t min_length, int mode,
search_ctx_t* ctx)
{
- auto d = CHILD_PER_SLOT;
+ auto d = L1_ENTRIES_PER_SLOT;
ceph_assert((pos_start % d) == 0);
ceph_assert((pos_end % d) == 0);
- uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0;
+ uint64_t l0_w = slots_per_slotset * L0_ENTRIES_PER_SLOT;
uint64_t l1_pos = pos_start;
const interval_t empty_tail;
return;
}
auto d0 = bits_per_slotset;
- uint64_t l1_w = CHILD_PER_SLOT;
+ uint64_t l1_w = L1_ENTRIES_PER_SLOT;
// this should be aligned with slotset boundaries
ceph_assert(0 == (l0_pos % d0));
ceph_assert(0 == (l0_pos_end % d0));
if (mask_to_apply == L1_ENTRY_NOT_USED) {
mask_to_apply = L1_ENTRY_FULL;
} else if (mask_to_apply != L1_ENTRY_FULL) {
- idx = p2roundup(idx, int64_t(slotset_width));
+ idx = p2roundup(idx, int64_t(slots_per_slotset));
mask_to_apply = L1_ENTRY_PARTIAL;
}
} else if (l0[idx] == all_slot_set) {
if (mask_to_apply == L1_ENTRY_NOT_USED) {
mask_to_apply = L1_ENTRY_FREE;
} else if (mask_to_apply != L1_ENTRY_FREE) {
- idx = p2roundup(idx, int64_t(slotset_width));
+ idx = p2roundup(idx, int64_t(slots_per_slotset));
mask_to_apply = L1_ENTRY_PARTIAL;
}
} else {
// no need to check the current slot set, it's partial
mask_to_apply = L1_ENTRY_PARTIAL;
++idx;
- idx = p2roundup(idx, int64_t(slotset_width));
+ idx = p2roundup(idx, int64_t(slots_per_slotset));
}
- if ((idx % slotset_width) == 0) {
+ if ((idx % slots_per_slotset) == 0) {
ceph_assert(mask_to_apply != L1_ENTRY_NOT_USED);
uint64_t shift = (l1_pos % l1_w) * L1_ENTRY_WIDTH;
slot_t& slot_val = l1[l1_pos / l1_w];
void AllocatorLevel01Loose::_mark_alloc_l0(int64_t l0_pos_start,
int64_t l0_pos_end)
{
- auto d0 = CHILD_PER_SLOT_L0;
+ auto d0 = L0_ENTRIES_PER_SLOT;
int64_t pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
uint64_t pos_start, uint64_t pos_end)
{
interval_t res = { 0, 0 };
- uint64_t l0_w = slotset_width * CHILD_PER_SLOT_L0;
+ uint64_t l0_w = slots_per_slotset * L0_ENTRIES_PER_SLOT;
if (unlikely(length <= l0_granularity)) {
search_ctx_t ctx;
uint64_t* allocated,
interval_vector_t* res)
{
- uint64_t d0 = CHILD_PER_SLOT_L0;
- uint64_t d1 = CHILD_PER_SLOT;
+ uint64_t d0 = L0_ENTRIES_PER_SLOT;
+ uint64_t d1 = L1_ENTRIES_PER_SLOT;
- ceph_assert(0 == (l1_pos_start % (slotset_width * d1)));
- ceph_assert(0 == (l1_pos_end % (slotset_width * d1)));
+ ceph_assert(0 == (l1_pos_start % (slots_per_slotset * d1)));
+ ceph_assert(0 == (l1_pos_end % (slots_per_slotset * d1)));
if (min_length != l0_granularity) {
// probably not the most effecient way but
// don't care much about that at the moment
}
}
} else {
- uint64_t l0_w = slotset_width * d0;
+ uint64_t l0_w = slots_per_slotset * d0;
for (auto idx = l1_pos_start / d1;
idx < l1_pos_end / d1 && length > *allocated;
size_t free_seq_cnt = 0;
for (auto slot : l0) {
if (slot == all_slot_set) {
- free_seq_cnt += CHILD_PER_SLOT_L0;
+ free_seq_cnt += L0_ENTRIES_PER_SLOT;
} else if(slot != all_slot_clear) {
size_t pos = 0;
do {
#endif
// fitting into cache line on x86_64
-static const size_t slotset_width = 8; // 8 slots per set
-static const size_t slots_per_slotset = 8;
-static const size_t slotset_bytes = sizeof(slot_t) * slotset_width;
+static const size_t slots_per_slotset = 8; // 8 slots per set
+static const size_t slotset_bytes = sizeof(slot_t) * slots_per_slotset;
static const size_t bits_per_slot = sizeof(slot_t) * 8;
static const size_t bits_per_slotset = slotset_bytes * 8;
static const slot_t all_slot_set = 0xffffffffffffffff;
L1_ENTRY_PARTIAL = 0x01,
L1_ENTRY_NOT_USED = 0x02,
L1_ENTRY_FREE = 0x03,
- CHILD_PER_SLOT = bits_per_slot / L1_ENTRY_WIDTH, // 32
L1_ENTRIES_PER_SLOT = bits_per_slot / L1_ENTRY_WIDTH, //32
- CHILD_PER_SLOT_L0 = bits_per_slot, // 64
+ L0_ENTRIES_PER_SLOT = bits_per_slot, // 64
};
uint64_t _children_per_slot() const override
{
- return CHILD_PER_SLOT;
+ return L1_ENTRIES_PER_SLOT;
}
interval_t _get_longest_from_l0(uint64_t pos0, uint64_t pos1,
uint64_t* allocated,
interval_vector_t* res)
{
- uint64_t d0 = CHILD_PER_SLOT_L0;
+ uint64_t d0 = L0_ENTRIES_PER_SLOT;
++l0_dives;
ceph_assert(l0_pos0 < l0_pos1);
ceph_assert(length > *allocated);
- ceph_assert(0 == (l0_pos0 % (slotset_width * d0)));
- ceph_assert(0 == (l0_pos1 % (slotset_width * d0)));
+ ceph_assert(0 == (l0_pos0 % (slots_per_slotset * d0)));
+ ceph_assert(0 == (l0_pos1 % (slots_per_slotset * d0)));
ceph_assert(((length - *allocated) % l0_granularity) == 0);
uint64_t need_entries = (length - *allocated) / l0_granularity;
// capacity to have slot alignment at l1
auto aligned_capacity =
p2roundup((int64_t)capacity,
- int64_t(l1_granularity * slotset_width * _children_per_slot()));
+ int64_t(l1_granularity * slots_per_slotset * _children_per_slot()));
size_t slot_count =
aligned_capacity / l1_granularity / _children_per_slot();
// we use set bit(s) as a marker for (partially) free entry
void _mark_free_l0(int64_t l0_pos_start, int64_t l0_pos_end)
{
- auto d0 = CHILD_PER_SLOT_L0;
+ auto d0 = L0_ENTRIES_PER_SLOT;
auto pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
bool _is_empty_l0(uint64_t l0_pos, uint64_t l0_pos_end)
{
bool no_free = true;
- uint64_t d = slotset_width * CHILD_PER_SLOT_L0;
+ uint64_t d = slots_per_slotset * L0_ENTRIES_PER_SLOT;
ceph_assert(0 == (l0_pos % d));
ceph_assert(0 == (l0_pos_end % d));
- auto idx = l0_pos / CHILD_PER_SLOT_L0;
- auto idx_end = l0_pos_end / CHILD_PER_SLOT_L0;
+ auto idx = l0_pos / L0_ENTRIES_PER_SLOT;
+ auto idx_end = l0_pos_end / L0_ENTRIES_PER_SLOT;
while (idx < idx_end && no_free) {
no_free = l0[idx] == all_slot_clear;
++idx;
bool _is_empty_l1(uint64_t l1_pos, uint64_t l1_pos_end)
{
bool no_free = true;
- uint64_t d = slotset_width * _children_per_slot();
+ uint64_t d = slots_per_slotset * _children_per_slot();
ceph_assert(0 == (l1_pos % d));
ceph_assert(0 == (l1_pos_end % d));
- auto idx = l1_pos / CHILD_PER_SLOT;
- auto idx_end = l1_pos_end / CHILD_PER_SLOT;
+ auto idx = l1_pos / L1_ENTRIES_PER_SLOT;
+ auto idx_end = l1_pos_end / L1_ENTRIES_PER_SLOT;
while (idx < idx_end && no_free) {
no_free = _is_slot_fully_allocated(idx);
++idx;
uint64_t debug_get_allocated(uint64_t pos0 = 0, uint64_t pos1 = 0)
{
if (pos1 == 0) {
- pos1 = l1.size() * CHILD_PER_SLOT;
+ pos1 = l1.size() * L1_ENTRIES_PER_SLOT;
}
auto avail = debug_get_free(pos0, pos1);
return (pos1 - pos0) * l1_granularity - avail;
uint64_t debug_get_free(uint64_t l1_pos0 = 0, uint64_t l1_pos1 = 0)
{
- ceph_assert(0 == (l1_pos0 % CHILD_PER_SLOT));
- ceph_assert(0 == (l1_pos1 % CHILD_PER_SLOT));
+ ceph_assert(0 == (l1_pos0 % L1_ENTRIES_PER_SLOT));
+ ceph_assert(0 == (l1_pos1 % L1_ENTRIES_PER_SLOT));
- auto idx0 = l1_pos0 * slotset_width;
- auto idx1 = l1_pos1 * slotset_width;
+ auto idx0 = l1_pos0 * slots_per_slotset;
+ auto idx1 = l1_pos1 * slots_per_slotset;
if (idx1 == 0) {
idx1 = l0.size();
for (uint64_t i = idx0; i < idx1; ++i) {
auto v = l0[i];
if (v == all_slot_set) {
- res += CHILD_PER_SLOT_L0;
+ res += L0_ENTRIES_PER_SLOT;
} else if (v != all_slot_clear) {
size_t cnt = 0;
#ifdef __GNUC__
uint64_t last_pos = 0;
enum {
- CHILD_PER_SLOT = bits_per_slot, // 64
+ L1_ENTRIES_PER_SLOT = bits_per_slot, // 64
};
uint64_t _children_per_slot() const override
{
- return CHILD_PER_SLOT;
+ return L1_ENTRIES_PER_SLOT;
}
uint64_t _level_granularity() const override
{
l1._init(capacity, _alloc_unit, mark_as_free);
l2_granularity =
- l1._level_granularity() * l1._children_per_slot() * slotset_width;
+ l1._level_granularity() * l1._children_per_slot() * slots_per_slotset;
// capacity to have slot alignment at l2
auto aligned_capacity =
- p2roundup((int64_t)capacity, (int64_t)l2_granularity * CHILD_PER_SLOT);
- size_t elem_count = aligned_capacity / l2_granularity / CHILD_PER_SLOT;
+ p2roundup((int64_t)capacity, (int64_t)l2_granularity * L1_ENTRIES_PER_SLOT);
+ size_t elem_count = aligned_capacity / l2_granularity / L1_ENTRIES_PER_SLOT;
// we use set bit(s) as a marker for (partially) free entry
l2.resize(elem_count, mark_as_free ? all_slot_set : all_slot_clear);
void _mark_l2_allocated(int64_t l2_pos, int64_t l2_pos_end)
{
- auto d = CHILD_PER_SLOT;
+ auto d = L1_ENTRIES_PER_SLOT;
ceph_assert(0 <= l2_pos_end);
ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
void _mark_l2_free(int64_t l2_pos, int64_t l2_pos_end)
{
- auto d = CHILD_PER_SLOT;
+ auto d = L1_ENTRIES_PER_SLOT;
ceph_assert(0 <= l2_pos_end);
ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
void _mark_l2_on_l1(int64_t l2_pos, int64_t l2_pos_end)
{
- auto d = CHILD_PER_SLOT;
+ auto d = L1_ENTRIES_PER_SLOT;
ceph_assert(0 <= l2_pos_end);
ceph_assert((int64_t)l2.size() >= (l2_pos_end / d));
- auto idx = l2_pos * slotset_width;
- auto idx_end = l2_pos_end * slotset_width;
+ auto idx = l2_pos * slots_per_slotset;
+ auto idx_end = l2_pos_end * slots_per_slotset;
bool all_allocated = true;
while (idx < idx_end) {
if (!l1._is_slot_fully_allocated(idx)) {
all_allocated = false;
- idx = p2roundup(int64_t(++idx), int64_t(slotset_width));
+ idx = p2roundup(int64_t(++idx), int64_t(slots_per_slotset));
}
else {
++idx;
}
- if ((idx % slotset_width) == 0) {
+ if ((idx % slots_per_slotset) == 0) {
if (all_allocated) {
l2[l2_pos / d] &= ~(slot_t(1) << (l2_pos % d));
}
interval_vector_t* res)
{
uint64_t prev_allocated = *allocated;
- uint64_t d = CHILD_PER_SLOT;
+ uint64_t d = L1_ENTRIES_PER_SLOT;
ceph_assert(isp2(min_length));
ceph_assert(min_length <= l2_granularity);
ceph_assert(max_length == 0 || max_length >= min_length);
max_length = cap;
}
- uint64_t l1_w = slotset_width * l1._children_per_slot();
+ uint64_t l1_w = slots_per_slotset * l1._children_per_slot();
std::lock_guard l(lock);