namespace crimson::os::seastore {
+struct alloc_paddr_result {
+ paddr_t start;
+ extent_len_t len;
+};
+
struct rbm_shard_info_t {
std::size_t size = 0;
uint64_t start_offset = 0;
// allocator, return start addr of allocated blocks
virtual paddr_t alloc_extent(size_t size) = 0;
+ using allocate_ret_bare = std::list<alloc_paddr_result>;
+ using allo_extents_ret = allocate_ertr::future<allocate_ret_bare>;
+ virtual allocate_ret_bare alloc_extents(size_t size) = 0;
+
virtual void mark_space_used(paddr_t paddr, size_t len) = 0;
virtual void mark_space_free(paddr_t paddr, size_t len) = 0;
return total_size;
}
+extent_len_t AvlAllocator::find_block(
+ size_t size,
+ rbm_abs_addr &start)
+{
+ uint64_t max_size = 0;
+ auto p = extent_size_tree.rbegin();
+ if (p != extent_size_tree.rend()) {
+ max_size = p->end - p->start;
+ }
+
+ assert(max_size);
+ if (max_size <= size) {
+ start = p->start;
+ return max_size;
+ }
+
+ const auto comp = extent_size_tree.key_comp();
+ auto iter = extent_size_tree.lower_bound(
+ extent_range_t{base_addr, base_addr + size}, comp);
+ ceph_assert(iter != extent_size_tree.end());
+ ceph_assert(is_aligned(iter->start, block_size));
+ ceph_assert(size <= iter->length());
+ start = iter->start;
+ return size;
+}
+
+
void AvlAllocator::_add_to_tree(rbm_abs_addr start, rbm_abs_addr size)
{
LOG_PREFIX(AvlAllocator::_add_to_tree);
return result;
}
+std::optional<interval_set<rbm_abs_addr>> AvlAllocator::alloc_extents(
+ size_t size)
+{
+ LOG_PREFIX(AvlAllocator::alloc_extents);
+ if (available_size < size) {
+ return std::nullopt;
+ }
+ if (extent_size_tree.empty()) {
+ return std::nullopt;
+ }
+ ceph_assert(size > 0);
+ ceph_assert(is_aligned(size, block_size));
+
+ interval_set<rbm_abs_addr> result;
+
+ auto try_to_alloc_block = [this, &result, FNAME] (uint64_t alloc_size)
+ {
+ while (alloc_size) {
+ rbm_abs_addr start = 0;
+ extent_len_t len = find_block(alloc_size, start);
+ ceph_assert(len);
+ _remove_from_tree(start, len);
+ DEBUG("allocate addr: {}, allocate size: {}, available size: {}",
+ start, len, available_size);
+ result.insert(start, len);
+ alloc_size -= len;
+ }
+ return 0;
+ };
+
+ auto alloc = std::min(max_alloc_size, size);
+ try_to_alloc_block(alloc);
+
+ assert(!result.empty());
+ for (auto p : result) {
+ INFO("result start: {}, end: {}", p.first, p.first + p.second);
+ if (detailed) {
+ assert(!reserved_extent_tracker.contains(p.first, p.second));
+ reserved_extent_tracker.insert(p.first, p.second);
+ }
+ }
+ return result;
+}
+
void AvlAllocator::free_extent(rbm_abs_addr addr, size_t size)
{
assert(total_size);
detailed(detailed) {}
std::optional<interval_set<rbm_abs_addr>> alloc_extent(
size_t size) final;
+ std::optional<interval_set<rbm_abs_addr>> alloc_extents(
+ size_t size) final;
void free_extent(rbm_abs_addr addr, size_t size) final;
void mark_extent_used(rbm_abs_addr addr, size_t size) final;
void _remove_from_tree(rbm_abs_addr start, rbm_abs_addr size);
rbm_abs_addr find_block(size_t size);
+ extent_len_t find_block(size_t size, rbm_abs_addr &start);
using extent_tree_t =
boost::intrusive::avl_set<
return paddr;
}
+BlockRBManager::allocate_ret_bare
+BlockRBManager::alloc_extents(size_t size)
+{
+ LOG_PREFIX(BlockRBManager::alloc_extents);
+ assert(allocator);
+ auto alloc = allocator->alloc_extents(size);
+ if (!alloc) {
+ return {};
+ }
+ allocate_ret_bare ret;
+ size_t len = 0;
+ for (auto extent = (*alloc).begin();
+ extent != (*alloc).end();
+ extent++) {
+ len += extent.get_len();
+ paddr_t paddr = convert_abs_addr_to_paddr(
+ extent.get_start(),
+ device->get_device_id());
+ DEBUG("allocated addr: {}, size: {}, requested size: {}",
+ paddr, extent.get_len(), size);
+ ret.push_back(
+ {std::move(paddr),
+ static_cast<extent_len_t>(extent.get_len())});
+ }
+ ceph_assert(size == len);
+ return ret;
+}
+
void BlockRBManager::complete_allocation(
paddr_t paddr, size_t size)
{