--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+//
+#include "avlallocator.h"
+#include "crimson/os/seastore/logging.h"
+
+SET_SUBSYS(seastore_device);
+
+namespace crimson::os::seastore {
+
+void AvlAllocator::mark_extent_used(rbm_abs_addr addr, size_t size)
+{
+ LOG_PREFIX(AvlAllocator::mark_extent_used);
+ DEBUG("addr: {}, size: {}, avail: {}", addr, size, available_size);
+ _remove_from_tree(addr, size);
+}
+
+void AvlAllocator::init(rbm_abs_addr addr, size_t size, size_t b_size)
+{
+ LOG_PREFIX(AvlAllocator::init);
+ DEBUG("addr: {}, size: {}", addr, size);
+ auto r = new extent_range_t{ addr, addr + size };
+ extent_tree.insert(*r);
+ extent_size_tree.insert(*r);
+ available_size = size;
+ block_size = b_size;
+ total_size = size;
+ base_addr = addr;
+}
+
+void AvlAllocator::_remove_from_tree(rbm_abs_addr start, rbm_abs_addr size)
+{
+ LOG_PREFIX(AvlAllocator::_remove_from_tree);
+ rbm_abs_addr end = start + size;
+
+ ceph_assert(size != 0);
+ ceph_assert(size <= available_size);
+
+ auto rs = extent_tree.find(extent_range_t{start, end}, extent_tree.key_comp());
+ DEBUG("rs start: {}, rs end: {}", rs->start, rs->end);
+ ceph_assert(rs != extent_tree.end());
+ ceph_assert(rs->start <= start);
+ ceph_assert(rs->end >= end);
+
+ bool left_over = (rs->start != start);
+ bool right_over = (rs->end != end);
+
+ _extent_size_tree_rm(*rs);
+
+ if (left_over && right_over) {
+ auto old_right_end = rs->end;
+ auto insert_pos = rs;
+ ceph_assert(insert_pos != extent_tree.end());
+ ++insert_pos;
+ rs->end = start;
+
+ auto r = new extent_range_t{end, old_right_end};
+ extent_tree.insert_before(insert_pos, *r);
+ extent_size_tree.insert(*r);
+ available_size += r->length();
+ _extent_size_tree_try_insert(*rs);
+ } else if (left_over) {
+ assert(is_aligned(start, block_size));
+ rs->end = start;
+ _extent_size_tree_try_insert(*rs);
+ } else if (right_over) {
+ assert(is_aligned(end, block_size));
+ rs->start = end;
+ _extent_size_tree_try_insert(*rs);
+ } else {
+ extent_tree.erase_and_dispose(rs, dispose_rs{});
+ }
+}
+
+rbm_abs_addr AvlAllocator::find_block(size_t size)
+{
+ const auto comp = extent_size_tree.key_comp();
+ auto iter = extent_size_tree.lower_bound(
+ extent_range_t{base_addr, base_addr + size}, comp);
+ for (; iter != extent_size_tree.end(); ++iter) {
+ assert(is_aligned(iter->start, block_size));
+ rbm_abs_addr off = iter->start;
+ if (off + size <= iter->end) {
+ return off;
+ }
+ }
+ return total_size;
+}
+
+void AvlAllocator::_add_to_tree(rbm_abs_addr start, rbm_abs_addr size)
+{
+ LOG_PREFIX(AvlAllocator::_add_to_tree);
+ ceph_assert(size != 0);
+ DEBUG("addr: {}, size: {}", start, size);
+
+ rbm_abs_addr end = start + size;
+
+ auto rs_after = extent_tree.upper_bound(extent_range_t{start, end},
+ extent_tree.key_comp());
+
+ auto rs_before = extent_tree.end();
+ if (rs_after != extent_tree.begin()) {
+ rs_before = std::prev(rs_after);
+ }
+
+ bool merge_before = (rs_before != extent_tree.end() && rs_before->end == start);
+ bool merge_after = (rs_after != extent_tree.end() && rs_after->start == end);
+
+ if (merge_before && merge_after) {
+ _extent_size_tree_rm(*rs_before);
+ _extent_size_tree_rm(*rs_after);
+ rs_after->start = rs_before->start;
+ extent_tree.erase_and_dispose(rs_before, dispose_rs{});
+ _extent_size_tree_try_insert(*rs_after);
+ } else if (merge_before) {
+ _extent_size_tree_rm(*rs_before);
+ rs_before->end = end;
+ _extent_size_tree_try_insert(*rs_before);
+ } else if (merge_after) {
+ _extent_size_tree_rm(*rs_after);
+ rs_after->start = start;
+ _extent_size_tree_try_insert(*rs_after);
+ } else {
+ auto r = new extent_range_t{start, end};
+ extent_tree.insert(*r);
+ extent_size_tree.insert(*r);
+ available_size += r->length();
+ }
+}
+
+std::optional<interval_set<rbm_abs_addr>> AvlAllocator::alloc_extent(
+ size_t size)
+{
+ LOG_PREFIX(AvlAllocator::alloc_extent);
+ if (available_size < size) {
+ return std::nullopt;
+ }
+ if (extent_size_tree.empty()) {
+ return std::nullopt;
+ }
+ ceph_assert(size > 0);
+ ceph_assert(is_aligned(size, block_size));
+
+ interval_set<rbm_abs_addr> result;
+
+ auto try_to_alloc_block = [this, &result, FNAME] (uint64_t alloc_size) -> uint64_t
+ {
+ rbm_abs_addr start = find_block(alloc_size);
+ if (start != base_addr + total_size) {
+ _remove_from_tree(start, alloc_size);
+ DEBUG("allocate addr: {}, allocate size: {}, available size: {}",
+ start, alloc_size, available_size);
+ result.insert(start, alloc_size);
+ return alloc_size;
+ }
+ return 0;
+ };
+
+ auto alloc = std::min(max_alloc_size, size);
+ rbm_abs_addr ret = try_to_alloc_block(alloc);
+ if (ret == 0) {
+ return std::nullopt;
+ }
+
+ assert(!result.empty());
+ assert(result.num_intervals() == 1);
+ for (auto p : result) {
+ INFO("result start: {}, end: {}", p.first, p.first + p.second);
+ }
+ return result;
+}
+
+void AvlAllocator::free_extent(rbm_abs_addr addr, size_t size)
+{
+ assert(total_size);
+ assert(total_size > available_size);
+ _add_to_tree(addr, size);
+}
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+// vim: ts=8 sw=2 smarttab expandtab
+
+#pragma once
+
+#include "extent_allocator.h"
+#include "include/ceph_assert.h"
+#include "include/buffer_fwd.h"
+#include "crimson/osd/exceptions.h"
+
+#include "crimson/os/seastore/transaction.h"
+#include <string.h>
+#include "include/buffer.h"
+
+#include <boost/intrusive/avl_set.hpp>
+#include <optional>
+#include <vector>
+
+namespace crimson::os::seastore {
+
+struct extent_range_t {
+ rbm_abs_addr start;
+ rbm_abs_addr end;
+
+ extent_range_t(rbm_abs_addr start, rbm_abs_addr end) :
+ start(start), end(end)
+ {}
+
+ struct before_t {
+ template<typename KeyLeft, typename KeyRight>
+ bool operator()(const KeyLeft& lhs, const KeyRight& rhs) const {
+ return lhs.end <= rhs.start;
+ }
+ };
+ boost::intrusive::avl_set_member_hook<> offset_hook;
+
+ struct shorter_t {
+ template<typename KeyType>
+ bool operator()(const extent_range_t& lhs, const KeyType& rhs) const {
+ auto lhs_size = lhs.length();
+ auto rhs_size = rhs.end - rhs.start;
+ if (lhs_size < rhs_size) {
+ return true;
+ } else if (lhs_size > rhs_size) {
+ return false;
+ } else {
+ return lhs.start < rhs.start;
+ }
+ }
+ };
+
+ size_t length() const {
+ return end - start;
+ }
+ boost::intrusive::avl_set_member_hook<> size_hook;
+};
+
+/*
+ * This is the simplest version of avlallocator from bluestore's avlallocator
+ */
+class AvlAllocator : public ExtentAllocator {
+public:
+ AvlAllocator(uint64_t block_size = 0, uint64_t available_size = 0) :
+ block_size(block_size), available_size(available_size) {}
+ std::optional<interval_set<rbm_abs_addr>> alloc_extent(
+ size_t size) final;
+
+ void free_extent(rbm_abs_addr addr, size_t size) final;
+ void mark_extent_used(rbm_abs_addr addr, size_t size) final;
+ void init(rbm_abs_addr addr, size_t size, size_t b_size);
+
+ struct dispose_rs {
+ void operator()(extent_range_t* p)
+ {
+ delete p;
+ }
+ };
+
+ ~AvlAllocator() {
+ close();
+ }
+
+ void close() {
+ extent_size_tree.clear();
+ extent_tree.clear_and_dispose(dispose_rs{});
+ total_size = 0;
+ block_size = 0;
+ available_size = 0;
+ base_addr = 0;
+ }
+
+ uint64_t get_available_size() const final {
+ return available_size;
+ }
+
+ uint64_t get_max_alloc_size() const final {
+ return max_alloc_size;
+ }
+
+private:
+ void _add_to_tree(rbm_abs_addr start, size_t size);
+
+ void _extent_size_tree_rm(extent_range_t& r) {
+ ceph_assert(available_size >= r.length());
+ available_size -= r.length();
+ extent_size_tree.erase(r);
+ }
+
+ void _extent_size_tree_try_insert(extent_range_t& r) {
+ extent_size_tree.insert(r);
+ available_size += r.length();
+ }
+
+ void _remove_from_tree(rbm_abs_addr start, rbm_abs_addr size);
+ rbm_abs_addr find_block(size_t size);
+
+ using extent_tree_t =
+ boost::intrusive::avl_set<
+ extent_range_t,
+ boost::intrusive::compare<extent_range_t::before_t>,
+ boost::intrusive::member_hook<
+ extent_range_t,
+ boost::intrusive::avl_set_member_hook<>,
+ &extent_range_t::offset_hook>>;
+ extent_tree_t extent_tree;
+
+ using extent_size_tree_t =
+ boost::intrusive::avl_set<
+ extent_range_t,
+ boost::intrusive::compare<extent_range_t::shorter_t>,
+ boost::intrusive::member_hook<
+ extent_range_t,
+ boost::intrusive::avl_set_member_hook<>,
+ &extent_range_t::size_hook>>;
+ extent_size_tree_t extent_size_tree;
+
+ uint64_t block_size = 0;
+ uint64_t available_size = 0;
+ uint64_t total_size = 0;
+ uint64_t base_addr = 0;
+ uint64_t max_alloc_size = 4 << 20;
+};
+
+}
--- /dev/null
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
+// vim: ts=8 sw=2 smarttab expandtab
+
+#pragma once
+
+#include <boost/intrusive_ptr.hpp>
+#include <boost/smart_ptr/intrusive_ref_counter.hpp>
+#include <seastar/core/future.hh>
+#include "crimson/os/seastore/seastore_types.h"
+#include "crimson/os/seastore/random_block_manager.h"
+#include "include/interval_set.h"
+
+namespace crimson::os::seastore {
+
+class ExtentAllocator {
+public:
+ /**
+ * alloc_extent
+ *
+ * Allocate continous region as much as given size.
+ * Note that the inital state of extent is RESERVED after alloc_extent().
+ * see rbm_extent_state_t in random_block_manager.h
+ *
+ * @param size
+ * @return nullopt or the address range (rbm_abs_addr, len)
+ */
+ virtual std::optional<interval_set<rbm_abs_addr>> alloc_extent(
+ size_t size) = 0;
+ /**
+ * free_extent
+ *
+ * free given region
+ *
+ * @param rbm_abs_addr
+ * @param size
+ */
+ virtual void free_extent(rbm_abs_addr addr, size_t size) = 0;
+ /**
+ * mark_extent_used
+ *
+ * This marks given region as used without alloc_extent.
+ *
+ * @param rbm_abs_addr
+ * @param size
+ */
+ virtual void mark_extent_used(rbm_abs_addr addr, size_t size) = 0;
+ /**
+ * init
+ *
+ * Initialize the address space the ExtentAllocator will manage
+ *
+ * @param start address (rbm_abs_addr)
+ * @param total size
+ * @param block size
+ */
+ virtual void init(rbm_abs_addr addr, size_t size, size_t b_size) = 0;
+ virtual uint64_t get_available_size() const = 0;
+ virtual uint64_t get_max_alloc_size() const = 0;
+ virtual void close() = 0;
+ virtual ~ExtentAllocator() {}
+};
+using ExtentAllocatorRef = std::unique_ptr<ExtentAllocator>;
+
+
+}