get_by_ext(efforts.retire_by_ext,
i->get_type()).increment(i->get_length());
retire_extent(i);
+ if (i->backend_type == device_type_t::RANDOM_BLOCK) {
+ paddr_t paddr = i->get_paddr();
+ rbm_alloc_delta_t delta;
+ delta.op = rbm_alloc_delta_t::op_types_t::CLEAR;
+ delta.alloc_blk_ranges.push_back(std::make_pair(paddr, i->get_length()));
+ t.add_rbm_alloc_info_blocks(delta);
+ }
}
record.extents.reserve(t.inline_block_list.size());
});
}
+ for (auto b : t.rbm_alloc_info_blocks) {
+ bufferlist bl;
+ encode(b, bl);
+ delta_info_t delta;
+ delta.type = extent_types_t::RBM_ALLOC_INFO;
+ delta.bl = bl;
+ record.deltas.push_back(delta);
+ }
+
for (auto &i: t.ool_block_list) {
ceph_assert(i->is_valid());
DEBUGT("fresh ool block {}", t, *i);
#include "crimson/os/seastore/cached_extent.h"
#include "crimson/os/seastore/root_block.h"
#include "crimson/os/seastore/segment_cleaner.h"
+#include "crimson/os/seastore/random_block_manager.h"
namespace crimson::os::seastore {
>;
virtual allocate_ertr::future<> alloc_extent(Transaction &t, size_t size) = 0; // allocator, return blocks
- using free_block_ertr = crimson::errorator<
- crimson::ct_error::input_output_error,
- crimson::ct_error::invarg
- >;
- // TODO: will include trim if necessary
- virtual free_block_ertr::future<> free_extent(Transaction &t, blk_paddr_t from, size_t len) = 0;
-
using abort_allocation_ertr = crimson::errorator<
crimson::ct_error::input_output_error,
crimson::ct_error::invarg
);
}
-NVMeManager::free_block_ertr::future<> NVMeManager::free_extent(
- Transaction &t, blk_paddr_t from, size_t len)
-{
- return free_block_ertr::now();
-}
-
-NVMeManager::free_block_ertr::future<> NVMeManager::add_free_extent(
+void NVMeManager::add_free_extent(
std::vector<rbm_alloc_delta_t>& v, blk_paddr_t from, size_t len)
{
ceph_assert(!(len % super.block_size));
alloc_info.alloc_blk_ranges.push_back(std::make_pair(paddr, len));
alloc_info.op = rbm_alloc_delta_t::op_types_t::CLEAR;
v.push_back(alloc_info);
- return free_block_ertr::now();
}
NVMeManager::write_ertr::future<> NVMeManager::rbm_sync_block_bitmap_by_range(
* add a range of free blocks to transaction
*
*/
- // TODO: will include trim if necessary
- free_block_ertr::future<> free_extent(
- Transaction &t, blk_paddr_t from, size_t len) final;
abort_allocation_ertr::future<> abort_allocation(Transaction &t) final;
write_ertr::future<> complete_allocation(Transaction &t) final;
write_ertr::future<> write(blk_paddr_t addr, bufferlist &bl);
write_ertr::future<> sync_allocation(
std::vector<rbm_alloc_delta_t>& alloc_blocks);
- free_block_ertr::future<> add_free_extent(
+ void add_free_extent(
std::vector<rbm_alloc_delta_t>& v, blk_paddr_t from, size_t len);
uint32_t get_blocks_per_segment() const final {
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::record_header_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::extent_info_t)
WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::segment_header_t)
+WRITE_CLASS_DENC_BOUNDED(crimson::os::seastore::rbm_alloc_delta_t)
template<>
struct denc_traits<crimson::os::seastore::device_type_t> {
tree_stats_t& get_lba_tree_stats() {
return lba_tree_stats;
}
+ void add_rbm_alloc_info_blocks(rbm_alloc_delta_t &d) {
+ rbm_alloc_info_blocks.push_back(d);
+ }
+ void clear_rbm_alloc_info_blocks() {
+ if (!rbm_alloc_info_blocks.empty()) {
+ rbm_alloc_info_blocks.clear();
+ }
+ }
+ const auto &get_rbm_alloc_info_blocks() {
+ return rbm_alloc_info_blocks;
+ }
struct ool_write_stats_t {
io_stat_t extents;
on_destruct_func_t on_destruct;
const src_t src;
+
+ std::vector<rbm_alloc_delta_t> rbm_alloc_info_blocks;
};
using TransactionRef = Transaction::Ref;
logger().debug("free_extent: start {} len {}", off * DEFAULT_BLOCK_SIZE,
len * DEFAULT_BLOCK_SIZE);
rbm_manager->add_free_extent(t.allocated_blocks, off * DEFAULT_BLOCK_SIZE,
- len * DEFAULT_BLOCK_SIZE).unsafe_get();
+ len * DEFAULT_BLOCK_SIZE);
}
}