bool BmapEntry::m_bit_mask_init = false;
bmap_mask_vec_t BmapEntry::m_bit_to_mask;
-/*
- * BmapEntityList functions.
- */
-void BmapEntityListIter::init(BitMapAreaList *list, int64_t start_idx, bool wrap)
-{
- m_list = list;
- m_start_idx = start_idx;
- m_cur_idx = m_start_idx;
- m_wrap = wrap;
- m_wrapped = false;
- m_end = false;
-}
-
-BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx)
-{
- init(list, start_idx, false);
-}
-
-BmapEntityListIter::BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap)
-{
- init(list, start_idx, wrap);
-}
-
-BitMapArea* BmapEntityListIter::next()
-{
- int64_t cur_idx = m_cur_idx;
-
- if (m_wrapped &&
- cur_idx == m_start_idx) {
- /*
- * End of wrap cycle + 1
- */
- if (!m_end) {
- m_end = true;
- return m_list->get_nth_item(cur_idx);
- }
- return NULL;
- }
- m_cur_idx++;
-
- if (m_cur_idx == m_list->size() &&
- m_wrap) {
- m_cur_idx = 0;
- m_wrapped = true;
- }
- if (cur_idx == m_list->size()) {
- /*
- * End of list
- */
- return NULL;
- }
- alloc_assert(cur_idx < m_list->size());
- return m_list->get_nth_item(cur_idx);
-}
int64_t BmapEntityListIter::index()
{
return std::atomic_fetch_add(&m_used_blocks, (int32_t)num_blocks) + num_blocks;
}
-int64_t BitMapZone::get_used_blocks()
+/* Intensionally hinted because BitMapAreaLeaf::child_check_n_lock. */
+inline int64_t BitMapZone::get_used_blocks()
{
return std::atomic_load(&m_used_blocks);
}
/*
* Check if some search took zone marker to end.
+ *
+ * The inline hint has been added intensionally because of importance of this
+ * method for BitMapAreaLeaf::child_check_n_lock, and thus for the overall
+ * allocator's performance. Examination of disassemblies coming from GCC 5.4.0
+ * showed that the compiler really needs that hint.
*/
-bool BitMapZone::is_exhausted()
+inline bool BitMapZone::is_exhausted()
{
/* BitMapZone::get_used_blocks operates atomically. No need for lock. */
return get_used_blocks() == size();
unlock();
}
-bool BitMapAreaLeaf::child_check_n_lock(BitMapZone* const child,
- const int64_t required,
- const bool lock)
+/* Intensionally hinted because BitMapAreaLeaf::alloc_blocks_dis_int. */
+inline bool BitMapAreaLeaf::child_check_n_lock(BitMapZone* const child,
+ const int64_t required,
+ const bool lock)
{
/* The exhausted check can be performed without acquiring the lock. This
* is because 1) BitMapZone::is_exhausted() actually operates atomically
}
};
+/* Intensionally inlined for the sake of BitMapAreaLeaf::alloc_blocks_dis_int. */
class BmapEntityListIter {
- BitMapAreaList *m_list;
+ BitMapAreaList* m_list;
int64_t m_start_idx;
int64_t m_cur_idx;
bool m_wrap;
bool m_wrapped;
bool m_end;
-public:
-
- void init(BitMapAreaList *list, int64_t start_idx, bool wrap);
- BmapEntityListIter(BitMapAreaList *list);
- BmapEntityListIter(BitMapAreaList *list, bool wrap);
+public:
+ BmapEntityListIter(BitMapAreaList* const list,
+ const int64_t start_idx,
+ const bool wrap = false)
+ : m_list(list),
+ m_start_idx(start_idx),
+ m_cur_idx(start_idx),
+ m_wrap(wrap),
+ m_wrapped(false),
+ m_end(false) {
+ }
+
+ BitMapArea* next() {
+ int64_t cur_idx = m_cur_idx;
- BmapEntityListIter(BitMapAreaList *list, int64_t start_idx);
+ if (m_wrapped &&
+ cur_idx == m_start_idx) {
+ /*
+ * End of wrap cycle + 1
+ */
+ if (!m_end) {
+ m_end = true;
+ return m_list->get_nth_item(cur_idx);
+ }
+ return NULL;
+ }
+ m_cur_idx++;
- BmapEntityListIter(BitMapAreaList *list, int64_t start_idx, bool wrap);
+ if (m_cur_idx == m_list->size() &&
+ m_wrap) {
+ m_cur_idx = 0;
+ m_wrapped = true;
+ }
+ if (cur_idx == m_list->size()) {
+ /*
+ * End of list
+ */
+ return NULL;
+ }
+ alloc_assert(cur_idx < m_list->size());
+ return m_list->get_nth_item(cur_idx);
+ }
- BitMapArea *next();
int64_t index();
};