SegmentedOolWriter::alloc_write_iertr::future<>
SegmentedOolWriter::do_write(
Transaction& t,
- std::list<LogicalCachedExtentRef>& extents)
+ std::list<CachedExtentRef>& extents)
{
LOG_PREFIX(SegmentedOolWriter::do_write);
assert(!extents.empty());
auto commit_time = seastar::lowres_system_clock::now();
for (auto it = extents.begin(); it != extents.end();) {
- auto& extent = *it;
+ auto& ext = *it;
+ assert(ext->is_logical());
+ auto extent = ext->template cast<LogicalCachedExtent>();
record_size_t wouldbe_rsize = record.size;
wouldbe_rsize.account_extent(extent->get_bptr().length());
using action_t = journal::RecordSubmitter::action_t;
SegmentedOolWriter::alloc_write_iertr::future<>
SegmentedOolWriter::alloc_write_ool_extents(
Transaction& t,
- std::list<LogicalCachedExtentRef>& extents)
+ std::list<CachedExtentRef>& extents)
{
if (extents.empty()) {
return alloc_write_iertr::now();
ExtentPlacementManager::alloc_paddr_iertr::future<>
ExtentPlacementManager::write_preallocated_ool_extents(
Transaction &t,
- std::list<LogicalCachedExtentRef> extents)
+ std::list<CachedExtentRef> extents)
{
LOG_PREFIX(ExtentPlacementManager::write_preallocated_ool_extents);
DEBUGT("start with {} allocated extents",
t, extents.size());
assert(writer_refs.size());
return seastar::do_with(
- std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>(),
+ std::map<ExtentOolWriter*, std::list<CachedExtentRef>>(),
[this, &t, extents=std::move(extents)](auto& alloc_map) {
for (auto& extent : extents) {
auto writer_ptr = get_writer(
RandomBlockOolWriter::alloc_write_iertr::future<>
RandomBlockOolWriter::alloc_write_ool_extents(
Transaction& t,
- std::list<LogicalCachedExtentRef>& extents)
+ std::list<CachedExtentRef>& extents)
{
if (extents.empty()) {
return alloc_write_iertr::now();
RandomBlockOolWriter::alloc_write_iertr::future<>
RandomBlockOolWriter::do_write(
Transaction& t,
- std::list<LogicalCachedExtentRef>& extents)
+ std::list<CachedExtentRef>& extents)
{
LOG_PREFIX(RandomBlockOolWriter::do_write);
assert(!extents.empty());
extent_len_t offset = 0;
bufferptr bp;
if (can_inplace_rewrite(t, ex)) {
- auto r = ex->get_modified_region();
+ assert(ex->is_logical());
+ auto r = ex->template cast<LogicalCachedExtent>()->get_modified_region();
ceph_assert(r.has_value());
offset = p2align(r->offset, rbm->get_block_size());
extent_len_t len =
if (ex->is_initial_pending()) {
t.mark_allocated_extent_ool(ex);
} else if (can_inplace_rewrite(t, ex)) {
- t.mark_inplace_rewrite_extent_ool(ex);
+ assert(ex->is_logical());
+ t.mark_inplace_rewrite_extent_ool(
+ ex->template cast<LogicalCachedExtent>());
} else {
ceph_assert("impossible");
}
using alloc_write_iertr = trans_iertr<alloc_write_ertr>;
virtual alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
- std::list<LogicalCachedExtentRef> &extents) = 0;
+ std::list<CachedExtentRef> &extents) = 0;
using close_ertr = base_ertr;
virtual close_ertr::future<> close() = 0;
alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
- std::list<LogicalCachedExtentRef> &extents) final;
+ std::list<CachedExtentRef> &extents) final;
close_ertr::future<> close() final {
return write_guard.close().then([this] {
private:
alloc_write_iertr::future<> do_write(
Transaction& t,
- std::list<LogicalCachedExtentRef> &extent);
+ std::list<CachedExtentRef> &extent);
alloc_write_ertr::future<> write_record(
Transaction& t,
alloc_write_iertr::future<> alloc_write_ool_extents(
Transaction &t,
- std::list<LogicalCachedExtentRef> &extents) final;
+ std::list<CachedExtentRef> &extents) final;
close_ertr::future<> close() final {
return write_guard.close().then([this] {
private:
alloc_write_iertr::future<> do_write(
Transaction& t,
- std::list<LogicalCachedExtentRef> &extent);
+ std::list<CachedExtentRef> &extent);
RBMCleaner* rb_cleaner;
seastar::gate write_guard;
* usage is used to reserve projected space
*/
using extents_by_writer_t =
- std::map<ExtentOolWriter*, std::list<LogicalCachedExtentRef>>;
+ std::map<ExtentOolWriter*, std::list<CachedExtentRef>>;
struct dispatch_result_t {
extents_by_writer_t alloc_map;
- std::list<LogicalCachedExtentRef> delayed_extents;
+ std::list<CachedExtentRef> delayed_extents;
io_usage_t usage;
};
*/
alloc_paddr_iertr::future<> write_preallocated_ool_extents(
Transaction &t,
- std::list<LogicalCachedExtentRef> extents);
+ std::list<CachedExtentRef> extents);
seastar::future<> stop_background() {
return background_process.stop_background();
* Specify the extent inline or ool
* return true indicates inline otherwise ool
*/
- bool dispatch_delayed_extent(LogicalCachedExtentRef& extent) {
+ bool dispatch_delayed_extent(CachedExtentRef& extent) {
// TODO: all delayed extents are ool currently
boost::ignore_unused(extent);
return false;
assert(ref->is_logical());
ref->set_paddr(make_delayed_temp_paddr(delayed_temp_offset));
delayed_temp_offset += ref->get_length();
- delayed_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
+ delayed_alloc_list.emplace_back(ref);
fresh_block_stats.increment(ref->get_length());
} else if (ref->get_paddr().is_absolute()) {
- pre_alloc_list.emplace_back(ref->cast<LogicalCachedExtent>());
+ pre_alloc_list.emplace_back(ref);
fresh_block_stats.increment(ref->get_length());
} else {
if (likely(ref->get_paddr() == make_record_relative_paddr(0))) {
return fresh_backref_extents;
}
- void mark_delayed_extent_inline(LogicalCachedExtentRef& ref) {
+ void mark_delayed_extent_inline(CachedExtentRef& ref) {
write_set.erase(*ref);
assert(ref->get_paddr().is_delayed());
ref->set_paddr(make_record_relative_paddr(offset),
write_set.insert(*ref);
}
- void mark_delayed_extent_ool(LogicalCachedExtentRef& ref) {
+ void mark_delayed_extent_ool(CachedExtentRef& ref) {
written_ool_block_list.push_back(ref);
}
write_set.insert(*ref);
}
- void mark_allocated_extent_ool(LogicalCachedExtentRef& ref) {
+ void mark_allocated_extent_ool(CachedExtentRef& ref) {
assert(ref->get_paddr().is_absolute());
assert(!ref->is_inline());
written_ool_block_list.push_back(ref);
}
- void mark_inplace_rewrite_extent_ool(LogicalCachedExtentRef& ref) {
+ void mark_inplace_rewrite_extent_ool(LogicalCachedExtentRef ref) {
assert(ref->get_paddr().is_absolute());
assert(!ref->is_inline());
written_inplace_ool_block_list.push_back(ref);
}
auto get_delayed_alloc_list() {
- std::list<LogicalCachedExtentRef> ret;
+ std::list<CachedExtentRef> ret;
for (auto& extent : delayed_alloc_list) {
// delayed extents may be invalidated
if (extent->is_valid()) {
}
auto get_valid_pre_alloc_list() {
- std::list<LogicalCachedExtentRef> ret;
+ std::list<CachedExtentRef> ret;
assert(num_allocated_invalid_extents == 0);
for (auto& extent : pre_alloc_list) {
if (extent->is_valid()) {
uint64_t num_delayed_invalid_extents = 0;
uint64_t num_allocated_invalid_extents = 0;
/// fresh blocks with delayed allocation, may become inline or ool below
- std::list<LogicalCachedExtentRef> delayed_alloc_list;
+ std::list<CachedExtentRef> delayed_alloc_list;
/// fresh blocks with pre-allocated addresses with RBM,
/// should be released upon conflicts, will be added to ool below
- std::list<LogicalCachedExtentRef> pre_alloc_list;
+ std::list<CachedExtentRef> pre_alloc_list;
/// dirty blocks for inplace rewrite with RBM, will be added to inplace ool below
std::list<LogicalCachedExtentRef> pre_inplace_rewrite_list;