From 6bbc3f9b9947575d91748bba849d9e7a1e7d27e5 Mon Sep 17 00:00:00 2001 From: Alex Ainscow Date: Wed, 9 Apr 2025 13:49:49 +0100 Subject: [PATCH] osd: Make EC alignment independent of page size. Code which manipulates full pages is often faster. To exploit this optimised EC was written to deal with 4k alignment wherever possible. When inputs are not aligned, they are quickly aligned to 4k. Not all architectures use 4k page sizes. Some power architectures for example have a 64k page size. In such situations, it is unlikely that using 64k page alignment will provide any performance boost, indeed it is likely to hurt performance significantly. As such, EC has been moved to maintain its internal alignment (4k), whcih can be configured. This has the added advantage, that we can can potentially tweak this value in the future. Signed-off-by: Alex Ainscow --- src/osd/ECBackend.cc | 2 +- src/osd/ECCommon.cc | 2 +- src/osd/ECTransaction.cc | 22 +++++----- src/osd/ECUtil.cc | 42 +++++++++---------- src/osd/ECUtil.h | 30 +++++++------- src/test/osd/TestECBackend.cc | 62 ++++++++++++++--------------- src/test/osd/test_ec_transaction.cc | 34 ++++++++-------- src/test/osd/test_extent_cache.cc | 18 ++++----- 8 files changed, 106 insertions(+), 106 deletions(-) diff --git a/src/osd/ECBackend.cc b/src/osd/ECBackend.cc index d219de97252a0..448d2f7d598a8 100644 --- a/src/osd/ECBackend.cc +++ b/src/osd/ECBackend.cc @@ -373,7 +373,7 @@ void ECBackend::RecoveryBackend::handle_recovery_read_complete( } } - uint64_t aligned_size = ECUtil::align_page_next(op.obc->obs.oi.size); + uint64_t aligned_size = ECUtil::align_next(op.obc->obs.oi.size); int r = op.returned_data->decode(ec_impl, shard_want_to_read, aligned_size); ceph_assert(r == 0); diff --git a/src/osd/ECCommon.cc b/src/osd/ECCommon.cc index 56fe62457eeb8..e63d310a911af 100644 --- a/src/osd/ECCommon.cc +++ b/src/osd/ECCommon.cc @@ -280,7 +280,7 @@ int ECCommon::ReadPipeline::get_min_avail_to_read_shards( extents.union_of(read_request.shard_want_to_read.at(shard)); } - extents.align(CEPH_PAGE_SIZE); + extents.align(EC_ALIGN_SIZE); if (read_mask.contains(shard)) { shard_read.extents.intersection_of(extents, read_mask.at(shard)); } diff --git a/src/osd/ECTransaction.cc b/src/osd/ECTransaction.cc index 519ee12533b3e..ebc0561410215 100644 --- a/src/osd/ECTransaction.cc +++ b/src/osd/ECTransaction.cc @@ -149,17 +149,17 @@ ECTransaction::WritePlanObj::WritePlanObj( /* Calculate any non-aligned pages. These need to be read and written */ extent_set aligned_ro_writes(unaligned_ro_writes); - aligned_ro_writes.align(CEPH_PAGE_SIZE); + aligned_ro_writes.align(EC_ALIGN_SIZE); extent_set partial_page_ro_writes(aligned_ro_writes); partial_page_ro_writes.subtract(unaligned_ro_writes); - partial_page_ro_writes.align(CEPH_PAGE_SIZE); + partial_page_ro_writes.align(EC_ALIGN_SIZE); extent_set write_superset; for (auto &&[off, len] : unaligned_ro_writes) { sinfo.ro_range_to_shard_extent_set_with_superset( off, len, will_write, write_superset); } - write_superset.align(CEPH_PAGE_SIZE); + write_superset.align(EC_ALIGN_SIZE); shard_id_set writable_parity_shards = shard_id_set::intersection(sinfo.get_parity_shards(), writable_shards); if (write_superset.size() > 0) { @@ -180,10 +180,10 @@ ECTransaction::WritePlanObj::WritePlanObj( reads.intersection_of(read_mask); do_parity_delta_write = false; } else { - will_write.align(CEPH_PAGE_SIZE); + will_write.align(EC_ALIGN_SIZE); ECUtil::shard_extent_set_t pdw_reads(will_write); - sinfo.ro_size_to_read_mask(ECUtil::align_page_next(orig_size), read_mask); + sinfo.ro_size_to_read_mask(ECUtil::align_next(orig_size), read_mask); /* Next we need to add the reads required for a conventional write */ for (auto shard : sinfo.get_data_shards()) { @@ -542,7 +542,7 @@ ECTransaction::Generate::Generate(PGTransaction &t, } if (entry && plan.orig_size < plan.projected_size) { - entry->mod_desc.append(ECUtil::align_page_next(plan.orig_size)); + entry->mod_desc.append(ECUtil::align_next(plan.orig_size)); } if (op.is_delete()) { @@ -623,8 +623,8 @@ void ECTransaction::Generate::truncate() { auto &t = transactions.at(shard); uint64_t start = eset.range_start(); - uint64_t start_align_prev = ECUtil::align_page_prev(start); - uint64_t start_align_next = ECUtil::align_page_next(start); + uint64_t start_align_prev = ECUtil::align_prev(start); + uint64_t start_align_next = ECUtil::align_next(start); uint64_t end = eset.range_end(); t.touch( coll_t(spg_t(pgid, shard)), @@ -695,12 +695,12 @@ void ECTransaction::Generate::overlay_writes() { void ECTransaction::Generate::appends_and_clone_ranges() { extent_set clone_ranges = plan.will_write.get_extent_superset(); - uint64_t clone_max = ECUtil::align_page_next(plan.orig_size); + uint64_t clone_max = ECUtil::align_next(plan.orig_size); if (op.delete_first) { clone_max = 0; } else if (op.truncate && op.truncate->first < clone_max) { - clone_max = ECUtil::align_page_next(op.truncate->first); + clone_max = ECUtil::align_next(op.truncate->first); } ECUtil::shard_extent_set_t cloneable_range(sinfo.get_k_plus_m()); sinfo.ro_size_to_read_mask(clone_max, cloneable_range); @@ -820,7 +820,7 @@ void ECTransaction::Generate::written_and_present_shards() { entry->mod_desc.rollback_extents( entry->version.version, rollback_extents, - ECUtil::align_page_next(plan.orig_size), + ECUtil::align_next(plan.orig_size), rollback_shards); } if (entry->written_shards.size() == sinfo.get_k_plus_m()) { diff --git a/src/osd/ECUtil.cc b/src/osd/ECUtil.cc index 9baf951e8b6ab..1fd5c33c8e191 100644 --- a/src/osd/ECUtil.cc +++ b/src/osd/ECUtil.cc @@ -161,7 +161,7 @@ void ECUtil::stripe_info_t::trim_shard_extent_set_for_ro_offset( ro_offset, raw_shard_id_t(0)); for (auto &&iter = shard_extent_set.begin(); iter != shard_extent_set.end() ;) { - iter->second.erase_after(align_page_next(shard_offset)); + iter->second.erase_after(align_next(shard_offset)); if (iter->second.empty()) iter = shard_extent_set.erase(iter); else ++iter; } @@ -179,7 +179,7 @@ void ECUtil::stripe_info_t::ro_size_to_stripe_aligned_read_mask( void ECUtil::stripe_info_t::ro_size_to_read_mask( uint64_t ro_size, shard_extent_set_t &shard_extent_set) const { - ro_range_to_shard_extent_set_with_parity(0, align_page_next(ro_size), + ro_range_to_shard_extent_set_with_parity(0, align_next(ro_size), shard_extent_set); } @@ -187,9 +187,9 @@ void ECUtil::stripe_info_t::ro_size_to_zero_mask( uint64_t ro_size, shard_extent_set_t &shard_extent_set) const { // There should never be any zero padding on the parity. - ro_range_to_shard_extent_set(align_page_next(ro_size), + ro_range_to_shard_extent_set(align_next(ro_size), ro_offset_to_next_stripe_ro_offset(ro_size) - - align_page_next(ro_size), + align_next(ro_size), shard_extent_set); trim_shard_extent_set_for_ro_offset(ro_size, shard_extent_set); } @@ -470,7 +470,7 @@ void shard_extent_map_t::insert_parity_buffers() { continue; } bufferlist bl; - bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE)); + bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE)); extent_maps[shard].insert(offset, length, bl); } } @@ -503,7 +503,7 @@ int shard_extent_map_t::_encode(const ErasureCodeInterfaceRef &ec_impl) { } if (rebuild_req) { - pad_and_rebuild_to_page_align(); + pad_and_rebuild_to_ec_align(); return _encode(ec_impl); } @@ -544,8 +544,8 @@ int shard_extent_map_t::encode_parity_delta( shard_extent_map_t &old_sem) { shard_id_set out_set = sinfo->get_parity_shards(); - pad_and_rebuild_to_page_align(); - old_sem.pad_and_rebuild_to_page_align(); + pad_and_rebuild_to_ec_align(); + old_sem.pad_and_rebuild_to_ec_align(); for (auto data_shard : sinfo->get_data_shards()) { shard_extent_map_t s(sinfo); @@ -568,9 +568,9 @@ int shard_extent_map_t::encode_parity_delta( shard_id_map &parity_shards = iter.get_out_bufferptrs(); unsigned int size = iter.get_length(); - ceph_assert(size % 4096 == 0); + ceph_assert(size % EC_ALIGN_SIZE == 0); ceph_assert(size > 0); - bufferptr delta = buffer::create_aligned(size, CEPH_PAGE_SIZE); + bufferptr delta = buffer::create_aligned(size, EC_ALIGN_SIZE); if (data_shards[shard_id_t(0)].length() != 0 && data_shards[shard_id_t(1)] .length() != 0) { @@ -595,7 +595,7 @@ void shard_extent_map_t::pad_on_shards(const shard_extent_set_t &pad_to, } for (auto &[off, length] : pad_to.at(shard)) { bufferlist bl; - bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE)); + bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE)); insert_in_shard(shard, off, bl); } } @@ -606,7 +606,7 @@ void shard_extent_map_t::pad_on_shards(const extent_set &pad_to, for (auto &shard : shards) { for (auto &[off, length] : pad_to) { bufferlist bl; - bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE)); + bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE)); insert_in_shard(shard, off, bl); } } @@ -719,7 +719,7 @@ int shard_extent_map_t::_decode(const ErasureCodeInterfaceRef &ec_impl, } if (rebuild_req) { - pad_and_rebuild_to_page_align(); + pad_and_rebuild_to_ec_align(); return _decode(ec_impl, want_set, need_set); } @@ -728,7 +728,7 @@ int shard_extent_map_t::_decode(const ErasureCodeInterfaceRef &ec_impl, return 0; } -void shard_extent_map_t::pad_and_rebuild_to_page_align() { +void shard_extent_map_t::pad_and_rebuild_to_ec_align() { bool resized = false; for (auto &&[shard, emap] : extent_maps) { extent_map aligned; @@ -742,21 +742,21 @@ void shard_extent_map_t::pad_and_rebuild_to_page_align() { uint64_t start = i.get_off(); uint64_t end = start + i.get_len(); - if ((start & ~CEPH_PAGE_MASK) != 0) { - bl.prepend_zero(start - (start & CEPH_PAGE_MASK)); - start = start & CEPH_PAGE_MASK; + if ((start & ~EC_ALIGN_MASK) != 0) { + bl.prepend_zero(start - (start & EC_ALIGN_MASK)); + start = start & EC_ALIGN_MASK; resized_i = true; } - if ((end & ~CEPH_PAGE_MASK) != 0) { - bl.append_zero((end & CEPH_PAGE_MASK) + CEPH_PAGE_SIZE - end); - end = (end & CEPH_PAGE_MASK) + CEPH_PAGE_SIZE; + if ((end & ~EC_ALIGN_MASK) != 0) { + bl.append_zero((end & EC_ALIGN_MASK) + EC_ALIGN_SIZE - end); + end = (end & EC_ALIGN_MASK) + EC_ALIGN_SIZE; resized_i = true; } // Perhaps we can get away without page aligning here and only SIMD // align. However, typical workloads are actually page aligned already, // so this should not cause problems on any sensible workload. - if (bl.rebuild_aligned_size_and_memory(bl.length(), CEPH_PAGE_SIZE) || + if (bl.rebuild_aligned_size_and_memory(bl.length(), EC_ALIGN_SIZE) || resized_i) { // We are not permitted to modify the emap while iterating. aligned.insert(start, end - start, bl); diff --git a/src/osd/ECUtil.h b/src/osd/ECUtil.h index 48cb70db37c85..63e6536077ab3 100644 --- a/src/osd/ECUtil.h +++ b/src/osd/ECUtil.h @@ -28,6 +28,10 @@ #include "osd_types.h" +// Must be a power of 2. +static inline constexpr uint64_t EC_ALIGN_SIZE = 4096; +static inline constexpr uint64_t EC_ALIGN_MASK = ~(EC_ALIGN_SIZE - 1); + /// If someone wants these types, but not ExtentCache, move to another file struct bl_split_merge { ceph::buffer::list split( @@ -190,14 +194,14 @@ public: bool is_page_aligned() const { for (auto &&[_, ptr] : in) { uintptr_t p = (uintptr_t)ptr.c_str(); - if (p & ~CEPH_PAGE_MASK) return false; - if ((p + ptr.length()) & ~CEPH_PAGE_MASK) return false; + if (p & ~EC_ALIGN_MASK) return false; + if ((p + ptr.length()) & ~EC_ALIGN_MASK) return false; } for (auto &&[_, ptr] : out) { uintptr_t p = (uintptr_t)ptr.c_str(); - if (p & ~CEPH_PAGE_MASK) return false; - if ((p + ptr.length()) & ~CEPH_PAGE_MASK) return false; + if (p & ~EC_ALIGN_MASK) return false; + if ((p + ptr.length()) & ~EC_ALIGN_MASK) return false; } return true; @@ -328,17 +332,12 @@ struct shard_extent_set_t { } }; -inline uint64_t page_mask() { - static const uint64_t page_mask = ((uint64_t)CEPH_PAGE_SIZE) - 1; - return page_mask; -} - -inline uint64_t align_page_next(uint64_t val) { - return p2roundup(val, (uint64_t)CEPH_PAGE_SIZE); +inline uint64_t align_next(uint64_t val) { + return p2roundup(val, EC_ALIGN_SIZE); } -inline uint64_t align_page_prev(uint64_t val) { - return p2align(val, (uint64_t)CEPH_PAGE_SIZE); +inline uint64_t align_prev(uint64_t val) { + return p2align(val, EC_ALIGN_SIZE); } class stripe_info_t { @@ -509,7 +508,7 @@ public: } shard_size += remainder; } - return ECUtil::align_page_next(shard_size); + return align_next(shard_size); } uint64_t ro_offset_to_shard_offset(uint64_t ro_offset, @@ -963,7 +962,7 @@ public: bool contains(shard_id_t shard) const; bool contains(std::optional const &other) const; bool contains(shard_extent_set_t const &other) const; - void pad_and_rebuild_to_page_align(); + void pad_and_rebuild_to_ec_align(); uint64_t size(); void clear(); uint64_t get_start_offset() const { return start_offset; } @@ -1066,3 +1065,4 @@ const std::string &get_hinfo_key(); WRITE_CLASS_ENCODER(ECUtil::HashInfo) } + diff --git a/src/test/osd/TestECBackend.cc b/src/test/osd/TestECBackend.cc index ec875abdb9e69..a77b7ed239b3f 100644 --- a/src/test/osd/TestECBackend.cc +++ b/src/test/osd/TestECBackend.cc @@ -765,8 +765,8 @@ TEST(ECCommon, get_min_want_to_read_shards) } TEST(ECCommon, get_min_avail_to_read_shards) { - const uint64_t page_size = CEPH_PAGE_SIZE; - const uint64_t swidth = 64*page_size; + const uint64_t align_size = EC_ALIGN_SIZE; + const uint64_t swidth = 64*align_size; const unsigned int k = 4; const unsigned int m = 2; const int nshards = 6; @@ -807,7 +807,7 @@ TEST(ECCommon, get_min_avail_to_read_shards) { hobject_t hoid; for (shard_id_t i; idefault_sub_chunk; - ref_shard_read.extents.insert(i*2*page_size, page_size); + ref_shard_read.extents.insert(i*2*align_size, align_size); ref_shard_read.pg_shard = pg_shard_t(i, shard_id_t(i)); } @@ -876,7 +876,7 @@ TEST(ECCommon, get_min_avail_to_read_shards) { hobject_t hoid; for (shard_id_t i; i(0)); + ECUtil::stripe_info_t sinfo(2, 1, 2 * EC_ALIGN_SIZE, &pool, std::vector(0)); object_info_t oi; - oi.size = 25*4096; + oi.size = 25*EC_ALIGN_SIZE; shard_id_set shards; shards.insert_range(shard_id_t(), 3); ECTransaction::WritePlanObj plan( @@ -303,7 +303,7 @@ TEST(ectransaction, test_append_not_page_aligned_with_large_hole) shards, shards, false, - 4096, + EC_ALIGN_SIZE, oi, std::nullopt, ECUtil::HashInfoRef(new ECUtil::HashInfo(1)), @@ -317,8 +317,8 @@ TEST(ectransaction, test_append_not_page_aligned_with_large_hole) // Writes should grow to 4k ECUtil::shard_extent_set_t ref_write(sinfo.get_k_plus_m()); - ref_write[shard_id_t(0)].insert(12*4096, 4096); - ref_write[shard_id_t(2)].insert(12*4096, 4096); + ref_write[shard_id_t(0)].insert(12*EC_ALIGN_SIZE, EC_ALIGN_SIZE); + ref_write[shard_id_t(2)].insert(12*EC_ALIGN_SIZE, EC_ALIGN_SIZE); ASSERT_EQ(ref_write, plan.will_write); } @@ -329,14 +329,14 @@ TEST(ectransaction, test_overwrite_with_missing) bufferlist a; // We have a 4k write quite a way after the current limit of a 4k object - a.append_zero(14*1024); + a.append_zero(14 * (EC_ALIGN_SIZE / 4)); op.buffer_updates.insert(0, a.length(), PGTransaction::ObjectOperation::BufferUpdate::Write{a, 0}); pg_pool_t pool; pool.set_flag(pg_pool_t::FLAG_EC_OPTIMIZATIONS); - ECUtil::stripe_info_t sinfo(2, 1, 8192, &pool, std::vector(0)); + ECUtil::stripe_info_t sinfo(2, 1, 2 * EC_ALIGN_SIZE, &pool, std::vector(0)); object_info_t oi; - oi.size = 42*1024; + oi.size = 42*(EC_ALIGN_SIZE / 4); shard_id_set shards; shards.insert(shard_id_t(0)); shards.insert(shard_id_t(1)); @@ -348,7 +348,7 @@ TEST(ectransaction, test_overwrite_with_missing) shards, shards, false, - 42*1024, + 42*(EC_ALIGN_SIZE / 4), oi, std::nullopt, ECUtil::HashInfoRef(new ECUtil::HashInfo(1)), @@ -360,13 +360,13 @@ TEST(ectransaction, test_overwrite_with_missing) // No reads (because not yet written) ASSERT_TRUE(plan.to_read); ECUtil::shard_extent_set_t ref_read(sinfo.get_k_plus_m()); - ref_read[shard_id_t(1)].insert(4096, 4096); + ref_read[shard_id_t(1)].insert(EC_ALIGN_SIZE, EC_ALIGN_SIZE); ASSERT_EQ(ref_read, plan.to_read); // Writes should grow to 4k ECUtil::shard_extent_set_t ref_write(sinfo.get_k_plus_m()); - ref_write[shard_id_t(0)].insert(0, 8192); - ref_write[shard_id_t(1)].insert(0, 8192); + ref_write[shard_id_t(0)].insert(0, 2 * EC_ALIGN_SIZE); + ref_write[shard_id_t(1)].insert(0, 2 * EC_ALIGN_SIZE); ASSERT_EQ(ref_write, plan.will_write); } diff --git a/src/test/osd/test_extent_cache.cc b/src/test/osd/test_extent_cache.cc index 12ded85f4082e..4e9695aff7df7 100644 --- a/src/test/osd/test_extent_cache.cc +++ b/src/test/osd/test_extent_cache.cc @@ -623,22 +623,22 @@ TEST(ECExtentCache, test_invalidate_lru) /* Populate the cache LRU and then invalidate the cache. */ { uint64_t bs = 3767; - auto io1 = iset_from_vector({{{align_page_prev(35*bs), align_page_next(36*bs) - align_page_prev(35*bs)}}}, cl.get_stripe_info()); + auto io1 = iset_from_vector({{{align_prev(35*bs), align_next(36*bs) - align_prev(35*bs)}}}, cl.get_stripe_info()); io1[shard_id_t(k)].insert(io1.get_extent_superset()); io1[shard_id_t(k+1)].insert(io1.get_extent_superset()); - auto io2 = iset_from_vector({{{align_page_prev(18*bs), align_page_next(19*bs) - align_page_prev(18*bs)}}}, cl.get_stripe_info()); + auto io2 = iset_from_vector({{{align_prev(18*bs), align_next(19*bs) - align_prev(18*bs)}}}, cl.get_stripe_info()); io2[shard_id_t(k)].insert(io1.get_extent_superset()); io2[shard_id_t(k+1)].insert(io1.get_extent_superset()); // io 3 is the truncate auto io3 = shard_extent_set_t(cl.sinfo.get_k_plus_m()); - auto io4 = iset_from_vector({{{align_page_prev(30*bs), align_page_next(31*bs) - align_page_prev(30*bs)}}}, cl.get_stripe_info()); + auto io4 = iset_from_vector({{{align_prev(30*bs), align_next(31*bs) - align_prev(30*bs)}}}, cl.get_stripe_info()); io3[shard_id_t(k)].insert(io1.get_extent_superset()); io3[shard_id_t(k+1)].insert(io1.get_extent_superset()); - auto io5 = iset_from_vector({{{align_page_prev(18*bs), align_page_next(19*bs) - align_page_prev(18*bs)}}}, cl.get_stripe_info()); + auto io5 = iset_from_vector({{{align_prev(18*bs), align_next(19*bs) - align_prev(18*bs)}}}, cl.get_stripe_info()); io4[shard_id_t(k)].insert(io1.get_extent_superset()); io4[shard_id_t(k+1)].insert(io1.get_extent_superset()); - optional op1 = cl.cache.prepare(cl.oid, nullopt, io1, 0, align_page_next(36*bs), false, + optional op1 = cl.cache.prepare(cl.oid, nullopt, io1, 0, align_next(36*bs), false, [&cl](ECExtentCache::OpRef &op) { cl.cache_ready(op->get_hoid(), op->get_result()); @@ -649,7 +649,7 @@ TEST(ECExtentCache, test_invalidate_lru) cl.complete_write(*op1); op1.reset(); - optional op2 = cl.cache.prepare(cl.oid, io2, io2, align_page_next(36*bs), align_page_next(36*bs), false, + optional op2 = cl.cache.prepare(cl.oid, io2, io2, align_next(36*bs), align_next(36*bs), false, [&cl](ECExtentCache::OpRef &op) { cl.cache_ready(op->get_hoid(), op->get_result()); @@ -662,7 +662,7 @@ TEST(ECExtentCache, test_invalidate_lru) cl.complete_write(*op2); op2.reset(); - optional op3 = cl.cache.prepare(cl.oid, nullopt, io3, align_page_next(36*bs), 0, false, + optional op3 = cl.cache.prepare(cl.oid, nullopt, io3, align_next(36*bs), 0, false, [&cl](ECExtentCache::OpRef &op) { cl.cache_ready(op->get_hoid(), op->get_result()); @@ -672,7 +672,7 @@ TEST(ECExtentCache, test_invalidate_lru) cl.complete_write(*op3); op3.reset(); - optional op4 = cl.cache.prepare(cl.oid, nullopt, io4, 0, align_page_next(30*bs), false, + optional op4 = cl.cache.prepare(cl.oid, nullopt, io4, 0, align_next(30*bs), false, [&cl](ECExtentCache::OpRef &op) { cl.cache_ready(op->get_hoid(), op->get_result()); @@ -682,7 +682,7 @@ TEST(ECExtentCache, test_invalidate_lru) cl.complete_write(*op4); op4.reset(); - optional op5 = cl.cache.prepare(cl.oid, io5, io5, align_page_next(30*bs), align_page_next(30*bs), false, + optional op5 = cl.cache.prepare(cl.oid, io5, io5, align_next(30*bs), align_next(30*bs), false, [&cl](ECExtentCache::OpRef &op) { cl.cache_ready(op->get_hoid(), op->get_result()); -- 2.39.5