}
}
- uint64_t aligned_size = ECUtil::align_page_next(op.obc->obs.oi.size);
+ uint64_t aligned_size = ECUtil::align_next(op.obc->obs.oi.size);
int r = op.returned_data->decode(ec_impl, shard_want_to_read, aligned_size);
ceph_assert(r == 0);
extents.union_of(read_request.shard_want_to_read.at(shard));
}
- extents.align(CEPH_PAGE_SIZE);
+ extents.align(EC_ALIGN_SIZE);
if (read_mask.contains(shard)) {
shard_read.extents.intersection_of(extents, read_mask.at(shard));
}
/* Calculate any non-aligned pages. These need to be read and written */
extent_set aligned_ro_writes(unaligned_ro_writes);
- aligned_ro_writes.align(CEPH_PAGE_SIZE);
+ aligned_ro_writes.align(EC_ALIGN_SIZE);
extent_set partial_page_ro_writes(aligned_ro_writes);
partial_page_ro_writes.subtract(unaligned_ro_writes);
- partial_page_ro_writes.align(CEPH_PAGE_SIZE);
+ partial_page_ro_writes.align(EC_ALIGN_SIZE);
extent_set write_superset;
for (auto &&[off, len] : unaligned_ro_writes) {
sinfo.ro_range_to_shard_extent_set_with_superset(
off, len, will_write, write_superset);
}
- write_superset.align(CEPH_PAGE_SIZE);
+ write_superset.align(EC_ALIGN_SIZE);
shard_id_set writable_parity_shards = shard_id_set::intersection(sinfo.get_parity_shards(), writable_shards);
if (write_superset.size() > 0) {
reads.intersection_of(read_mask);
do_parity_delta_write = false;
} else {
- will_write.align(CEPH_PAGE_SIZE);
+ will_write.align(EC_ALIGN_SIZE);
ECUtil::shard_extent_set_t pdw_reads(will_write);
- sinfo.ro_size_to_read_mask(ECUtil::align_page_next(orig_size), read_mask);
+ sinfo.ro_size_to_read_mask(ECUtil::align_next(orig_size), read_mask);
/* Next we need to add the reads required for a conventional write */
for (auto shard : sinfo.get_data_shards()) {
}
if (entry && plan.orig_size < plan.projected_size) {
- entry->mod_desc.append(ECUtil::align_page_next(plan.orig_size));
+ entry->mod_desc.append(ECUtil::align_next(plan.orig_size));
}
if (op.is_delete()) {
auto &t = transactions.at(shard);
uint64_t start = eset.range_start();
- uint64_t start_align_prev = ECUtil::align_page_prev(start);
- uint64_t start_align_next = ECUtil::align_page_next(start);
+ uint64_t start_align_prev = ECUtil::align_prev(start);
+ uint64_t start_align_next = ECUtil::align_next(start);
uint64_t end = eset.range_end();
t.touch(
coll_t(spg_t(pgid, shard)),
void ECTransaction::Generate::appends_and_clone_ranges() {
extent_set clone_ranges = plan.will_write.get_extent_superset();
- uint64_t clone_max = ECUtil::align_page_next(plan.orig_size);
+ uint64_t clone_max = ECUtil::align_next(plan.orig_size);
if (op.delete_first) {
clone_max = 0;
} else if (op.truncate && op.truncate->first < clone_max) {
- clone_max = ECUtil::align_page_next(op.truncate->first);
+ clone_max = ECUtil::align_next(op.truncate->first);
}
ECUtil::shard_extent_set_t cloneable_range(sinfo.get_k_plus_m());
sinfo.ro_size_to_read_mask(clone_max, cloneable_range);
entry->mod_desc.rollback_extents(
entry->version.version,
rollback_extents,
- ECUtil::align_page_next(plan.orig_size),
+ ECUtil::align_next(plan.orig_size),
rollback_shards);
}
if (entry->written_shards.size() == sinfo.get_k_plus_m()) {
ro_offset, raw_shard_id_t(0));
for (auto &&iter = shard_extent_set.begin(); iter != shard_extent_set.end()
;) {
- iter->second.erase_after(align_page_next(shard_offset));
+ iter->second.erase_after(align_next(shard_offset));
if (iter->second.empty()) iter = shard_extent_set.erase(iter);
else ++iter;
}
void ECUtil::stripe_info_t::ro_size_to_read_mask(
uint64_t ro_size,
shard_extent_set_t &shard_extent_set) const {
- ro_range_to_shard_extent_set_with_parity(0, align_page_next(ro_size),
+ ro_range_to_shard_extent_set_with_parity(0, align_next(ro_size),
shard_extent_set);
}
uint64_t ro_size,
shard_extent_set_t &shard_extent_set) const {
// There should never be any zero padding on the parity.
- ro_range_to_shard_extent_set(align_page_next(ro_size),
+ ro_range_to_shard_extent_set(align_next(ro_size),
ro_offset_to_next_stripe_ro_offset(ro_size) -
- align_page_next(ro_size),
+ align_next(ro_size),
shard_extent_set);
trim_shard_extent_set_for_ro_offset(ro_size, shard_extent_set);
}
continue;
}
bufferlist bl;
- bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE));
+ bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE));
extent_maps[shard].insert(offset, length, bl);
}
}
}
if (rebuild_req) {
- pad_and_rebuild_to_page_align();
+ pad_and_rebuild_to_ec_align();
return _encode(ec_impl);
}
shard_extent_map_t &old_sem) {
shard_id_set out_set = sinfo->get_parity_shards();
- pad_and_rebuild_to_page_align();
- old_sem.pad_and_rebuild_to_page_align();
+ pad_and_rebuild_to_ec_align();
+ old_sem.pad_and_rebuild_to_ec_align();
for (auto data_shard : sinfo->get_data_shards()) {
shard_extent_map_t s(sinfo);
shard_id_map<bufferptr> &parity_shards = iter.get_out_bufferptrs();
unsigned int size = iter.get_length();
- ceph_assert(size % 4096 == 0);
+ ceph_assert(size % EC_ALIGN_SIZE == 0);
ceph_assert(size > 0);
- bufferptr delta = buffer::create_aligned(size, CEPH_PAGE_SIZE);
+ bufferptr delta = buffer::create_aligned(size, EC_ALIGN_SIZE);
if (data_shards[shard_id_t(0)].length() != 0 && data_shards[shard_id_t(1)]
.length() != 0) {
}
for (auto &[off, length] : pad_to.at(shard)) {
bufferlist bl;
- bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE));
+ bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE));
insert_in_shard(shard, off, bl);
}
}
for (auto &shard : shards) {
for (auto &[off, length] : pad_to) {
bufferlist bl;
- bl.push_back(buffer::create_aligned(length, CEPH_PAGE_SIZE));
+ bl.push_back(buffer::create_aligned(length, EC_ALIGN_SIZE));
insert_in_shard(shard, off, bl);
}
}
}
if (rebuild_req) {
- pad_and_rebuild_to_page_align();
+ pad_and_rebuild_to_ec_align();
return _decode(ec_impl, want_set, need_set);
}
return 0;
}
-void shard_extent_map_t::pad_and_rebuild_to_page_align() {
+void shard_extent_map_t::pad_and_rebuild_to_ec_align() {
bool resized = false;
for (auto &&[shard, emap] : extent_maps) {
extent_map aligned;
uint64_t start = i.get_off();
uint64_t end = start + i.get_len();
- if ((start & ~CEPH_PAGE_MASK) != 0) {
- bl.prepend_zero(start - (start & CEPH_PAGE_MASK));
- start = start & CEPH_PAGE_MASK;
+ if ((start & ~EC_ALIGN_MASK) != 0) {
+ bl.prepend_zero(start - (start & EC_ALIGN_MASK));
+ start = start & EC_ALIGN_MASK;
resized_i = true;
}
- if ((end & ~CEPH_PAGE_MASK) != 0) {
- bl.append_zero((end & CEPH_PAGE_MASK) + CEPH_PAGE_SIZE - end);
- end = (end & CEPH_PAGE_MASK) + CEPH_PAGE_SIZE;
+ if ((end & ~EC_ALIGN_MASK) != 0) {
+ bl.append_zero((end & EC_ALIGN_MASK) + EC_ALIGN_SIZE - end);
+ end = (end & EC_ALIGN_MASK) + EC_ALIGN_SIZE;
resized_i = true;
}
// Perhaps we can get away without page aligning here and only SIMD
// align. However, typical workloads are actually page aligned already,
// so this should not cause problems on any sensible workload.
- if (bl.rebuild_aligned_size_and_memory(bl.length(), CEPH_PAGE_SIZE) ||
+ if (bl.rebuild_aligned_size_and_memory(bl.length(), EC_ALIGN_SIZE) ||
resized_i) {
// We are not permitted to modify the emap while iterating.
aligned.insert(start, end - start, bl);
#include "osd_types.h"
+// Must be a power of 2.
+static inline constexpr uint64_t EC_ALIGN_SIZE = 4096;
+static inline constexpr uint64_t EC_ALIGN_MASK = ~(EC_ALIGN_SIZE - 1);
+
/// If someone wants these types, but not ExtentCache, move to another file
struct bl_split_merge {
ceph::buffer::list split(
bool is_page_aligned() const {
for (auto &&[_, ptr] : in) {
uintptr_t p = (uintptr_t)ptr.c_str();
- if (p & ~CEPH_PAGE_MASK) return false;
- if ((p + ptr.length()) & ~CEPH_PAGE_MASK) return false;
+ if (p & ~EC_ALIGN_MASK) return false;
+ if ((p + ptr.length()) & ~EC_ALIGN_MASK) return false;
}
for (auto &&[_, ptr] : out) {
uintptr_t p = (uintptr_t)ptr.c_str();
- if (p & ~CEPH_PAGE_MASK) return false;
- if ((p + ptr.length()) & ~CEPH_PAGE_MASK) return false;
+ if (p & ~EC_ALIGN_MASK) return false;
+ if ((p + ptr.length()) & ~EC_ALIGN_MASK) return false;
}
return true;
}
};
-inline uint64_t page_mask() {
- static const uint64_t page_mask = ((uint64_t)CEPH_PAGE_SIZE) - 1;
- return page_mask;
-}
-
-inline uint64_t align_page_next(uint64_t val) {
- return p2roundup(val, (uint64_t)CEPH_PAGE_SIZE);
+inline uint64_t align_next(uint64_t val) {
+ return p2roundup(val, EC_ALIGN_SIZE);
}
-inline uint64_t align_page_prev(uint64_t val) {
- return p2align(val, (uint64_t)CEPH_PAGE_SIZE);
+inline uint64_t align_prev(uint64_t val) {
+ return p2align(val, EC_ALIGN_SIZE);
}
class stripe_info_t {
}
shard_size += remainder;
}
- return ECUtil::align_page_next(shard_size);
+ return align_next(shard_size);
}
uint64_t ro_offset_to_shard_offset(uint64_t ro_offset,
bool contains(shard_id_t shard) const;
bool contains(std::optional<shard_extent_set_t> const &other) const;
bool contains(shard_extent_set_t const &other) const;
- void pad_and_rebuild_to_page_align();
+ void pad_and_rebuild_to_ec_align();
uint64_t size();
void clear();
uint64_t get_start_offset() const { return start_offset; }
WRITE_CLASS_ENCODER(ECUtil::HashInfo)
}
+
}
TEST(ECCommon, get_min_avail_to_read_shards) {
- const uint64_t page_size = CEPH_PAGE_SIZE;
- const uint64_t swidth = 64*page_size;
+ const uint64_t align_size = EC_ALIGN_SIZE;
+ const uint64_t swidth = 64*align_size;
const unsigned int k = 4;
const unsigned int m = 2;
const int nshards = 6;
hobject_t hoid;
for (shard_id_t i; i<k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size, page_size);
+ to_read_list[i].insert(int(i) * 2 * align_size, align_size);
}
ECCommon::read_request_t read_request(to_read_list, false, object_size);
ECUtil::shard_extent_set_t to_read_list(s.get_k_plus_m());
hobject_t hoid;
for (shard_id_t i; i<k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size, page_size);
+ to_read_list[i].insert(int(i) * 2 * align_size, align_size);
}
ECCommon::read_request_t read_request(to_read_list, false, object_size);
hobject_t hoid;
for (shard_id_t i; i < (int)k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size + int(i) + 1, int(i) + 1);
+ to_read_list[i].insert(int(i) * 2 * align_size + int(i) + 1, int(i) + 1);
}
ECCommon::read_request_t ref(to_read_list, false, object_size);
ECCommon::read_request_t read_request(to_read_list, false, object_size);
shard_id_t shard_id(i);
ECCommon::shard_read_t &ref_shard_read = ref.shard_reads[shard_id];
ref_shard_read.subchunk = ecode->default_sub_chunk;
- ref_shard_read.extents.insert(i*2*page_size, page_size);
+ ref_shard_read.extents.insert(i*2*align_size, align_size);
ref_shard_read.pg_shard = pg_shard_t(i, shard_id_t(i));
}
hobject_t hoid;
for (shard_id_t i; i<k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size, page_size);
+ to_read_list[i].insert(int(i) * 2 * align_size, align_size);
}
ECCommon::read_request_t read_request(to_read_list, false, object_size);
hobject_t hoid;
unsigned int missing_shard = 1;
- to_read_list[shard_id_t(0)].insert(0, page_size);
- to_read_list[shard_id_t(1)].insert(page_size, page_size);
- to_read_list[shard_id_t(2)].insert(2*page_size, page_size);
- to_read_list[shard_id_t(3)].insert(3*page_size, page_size);
+ to_read_list[shard_id_t(0)].insert(0, align_size);
+ to_read_list[shard_id_t(1)].insert(align_size, align_size);
+ to_read_list[shard_id_t(2)].insert(2*align_size, align_size);
+ to_read_list[shard_id_t(3)].insert(3*align_size, align_size);
ECCommon::read_request_t read_request(to_read_list, false, object_size);
ECCommon::read_request_t ref(to_read_list, false, object_size);
// Populating reference manually to check that adjacent shards get correctly combined.
- ref.shard_reads[shard_id_t(0)].extents.insert(0, page_size*2);
- ref.shard_reads[shard_id_t(2)].extents.insert(page_size, page_size*2);
- ref.shard_reads[shard_id_t(3)].extents.insert(page_size, page_size);
- ref.shard_reads[shard_id_t(3)].extents.insert(3*page_size, page_size);
- ref.shard_reads[shard_id_t(4)].extents.insert(page_size, page_size);
+ ref.shard_reads[shard_id_t(0)].extents.insert(0, align_size*2);
+ ref.shard_reads[shard_id_t(2)].extents.insert(align_size, align_size*2);
+ ref.shard_reads[shard_id_t(3)].extents.insert(align_size, align_size);
+ ref.shard_reads[shard_id_t(3)].extents.insert(3*align_size, align_size);
+ ref.shard_reads[shard_id_t(4)].extents.insert(align_size, align_size);
ref.shard_reads[shard_id_t(0)].pg_shard = pg_shard_t(0, shard_id_t(0));
ref.shard_reads[shard_id_t(2)].pg_shard = pg_shard_t(2, shard_id_t(2));
ref.shard_reads[shard_id_t(3)].pg_shard = pg_shard_t(3, shard_id_t(3));
extent_set extents_to_read;
for (shard_id_t i; i<k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size, page_size);
- extents_to_read.insert(int(i) * 2 * page_size, page_size);
+ to_read_list[i].insert(int(i) * 2 * align_size, align_size);
+ extents_to_read.insert(int(i) * 2 * align_size, align_size);
}
ECCommon::read_request_t read_request(to_read_list, false, object_size);
hobject_t hoid;
for (shard_id_t i; i<k; ++i) {
- to_read_list[i].insert(int(i) * 2 * page_size, page_size);
+ to_read_list[i].insert(int(i) * 2 * align_size, align_size);
}
ECCommon::read_request_t read_request(to_read_list, false, object_size);
TEST(ECCommon, shard_read_combo_tests)
{
- const uint64_t page_size = CEPH_PAGE_SIZE;
- const uint64_t swidth = 2*page_size;
+ const uint64_t align_size = EC_ALIGN_SIZE;
+ const uint64_t swidth = 2*align_size;
const unsigned int k = 2;
const unsigned int m = 2;
const int nshards = 4;
TEST(ECCommon, get_remaining_shards)
{
- const uint64_t page_size = CEPH_PAGE_SIZE;
- const uint64_t swidth = 64*page_size;
+ const uint64_t align_size = EC_ALIGN_SIZE;
+ const uint64_t swidth = 64*align_size;
const unsigned int k = 4;
const unsigned int m = 2;
const int nshards = 6;
hobject_t hoid;
ECUtil::shard_extent_set_t to_read(s.get_k_plus_m());
- s.ro_range_to_shard_extent_set(chunk_size/2, chunk_size+page_size, to_read);
+ s.ro_range_to_shard_extent_set(chunk_size/2, chunk_size+align_size, to_read);
ECCommon::read_request_t read_request(to_read, false, object_size);
unsigned int missing_shard = 1;
}
ref.shard_reads[shard_id_t(0)].extents.insert(0, chunk_size/2);
ref.shard_reads[shard_id_t(0)].pg_shard = pg_shards[0];
- ref.shard_reads[shard_id_t(2)].extents.insert(0, chunk_size/2+page_size);
+ ref.shard_reads[shard_id_t(2)].extents.insert(0, chunk_size/2+align_size);
ref.shard_reads[shard_id_t(2)].pg_shard = pg_shards[2];
- ref.shard_reads[shard_id_t(3)].extents.insert(0, chunk_size/2+page_size);
+ ref.shard_reads[shard_id_t(3)].extents.insert(0, chunk_size/2+align_size);
ref.shard_reads[shard_id_t(3)].pg_shard = pg_shards[3];
- ref.shard_reads[shard_id_t(4)].extents.insert(0, chunk_size/2+page_size);
+ ref.shard_reads[shard_id_t(4)].extents.insert(0, chunk_size/2+align_size);
ref.shard_reads[shard_id_t(4)].pg_shard = pg_shards[4];
ASSERT_EQ(read_request, ref);
}
TEST(ECCommon, encode)
{
- const uint64_t page_size = CEPH_PAGE_SIZE;
- const uint64_t swidth = 2*page_size;
+ const uint64_t align_size = EC_ALIGN_SIZE;
+ const uint64_t swidth = 2*align_size;
const unsigned int k = 2;
const unsigned int m = 2;
TEST(ECCommon, decode)
{
- const uint64_t page_size = CEPH_PAGE_SIZE;
- const uint64_t swidth = 3*page_size;
+ const uint64_t align_size = EC_ALIGN_SIZE;
+ const uint64_t swidth = 3*align_size;
const unsigned int k = 3;
const unsigned int m = 2;
// The object is empty, so we should have no reads and an 4k write.
ASSERT_FALSE(plan.to_read);
extent_set ref_write;
- ref_write.insert(0, 4096);
+ ref_write.insert(0, EC_ALIGN_SIZE);
ASSERT_EQ(2u, plan.will_write.shard_count());
ASSERT_EQ(ref_write, plan.will_write.at(shard_id_t(0)));
ASSERT_EQ(ref_write, plan.will_write.at(shard_id_t(2)));
// There should be no overlap of this read.
ASSERT_EQ(1u, (*plan.to_read).shard_count());
extent_set ref;
- ref.insert(0, 4096);
+ ref.insert(0, EC_ALIGN_SIZE);
ASSERT_EQ(2u, plan.will_write.shard_count());
ASSERT_EQ(1u, (*plan.to_read).shard_count());
ASSERT_EQ(ref, plan.will_write.at(shard_id_t(0)));
PGTransaction::ObjectOperation op;
bufferlist a;
- // We have a 4k write quite a way after the current limit of a 4k object
- a.append_zero(2048);
- op.buffer_updates.insert(24*4096 + 1024, a.length(), PGTransaction::ObjectOperation::BufferUpdate::Write{a, 0});
+ // We have a 4k write quite a way after the current limit of a EC_ALIGN_SIZE object
+ a.append_zero(EC_ALIGN_SIZE / 2);
+ op.buffer_updates.insert(24 * EC_ALIGN_SIZE + EC_ALIGN_SIZE / 4, a.length(), PGTransaction::ObjectOperation::BufferUpdate::Write{a, 0});
pg_pool_t pool;
pool.set_flag(pg_pool_t::FLAG_EC_OPTIMIZATIONS);
- ECUtil::stripe_info_t sinfo(2, 1, 8192, &pool, std::vector<shard_id_t>(0));
+ ECUtil::stripe_info_t sinfo(2, 1, 2 * EC_ALIGN_SIZE, &pool, std::vector<shard_id_t>(0));
object_info_t oi;
- oi.size = 25*4096;
+ oi.size = 25*EC_ALIGN_SIZE;
shard_id_set shards;
shards.insert_range(shard_id_t(), 3);
ECTransaction::WritePlanObj plan(
shards,
shards,
false,
- 4096,
+ EC_ALIGN_SIZE,
oi,
std::nullopt,
ECUtil::HashInfoRef(new ECUtil::HashInfo(1)),
// Writes should grow to 4k
ECUtil::shard_extent_set_t ref_write(sinfo.get_k_plus_m());
- ref_write[shard_id_t(0)].insert(12*4096, 4096);
- ref_write[shard_id_t(2)].insert(12*4096, 4096);
+ ref_write[shard_id_t(0)].insert(12*EC_ALIGN_SIZE, EC_ALIGN_SIZE);
+ ref_write[shard_id_t(2)].insert(12*EC_ALIGN_SIZE, EC_ALIGN_SIZE);
ASSERT_EQ(ref_write, plan.will_write);
}
bufferlist a;
// We have a 4k write quite a way after the current limit of a 4k object
- a.append_zero(14*1024);
+ a.append_zero(14 * (EC_ALIGN_SIZE / 4));
op.buffer_updates.insert(0, a.length(), PGTransaction::ObjectOperation::BufferUpdate::Write{a, 0});
pg_pool_t pool;
pool.set_flag(pg_pool_t::FLAG_EC_OPTIMIZATIONS);
- ECUtil::stripe_info_t sinfo(2, 1, 8192, &pool, std::vector<shard_id_t>(0));
+ ECUtil::stripe_info_t sinfo(2, 1, 2 * EC_ALIGN_SIZE, &pool, std::vector<shard_id_t>(0));
object_info_t oi;
- oi.size = 42*1024;
+ oi.size = 42*(EC_ALIGN_SIZE / 4);
shard_id_set shards;
shards.insert(shard_id_t(0));
shards.insert(shard_id_t(1));
shards,
shards,
false,
- 42*1024,
+ 42*(EC_ALIGN_SIZE / 4),
oi,
std::nullopt,
ECUtil::HashInfoRef(new ECUtil::HashInfo(1)),
// No reads (because not yet written)
ASSERT_TRUE(plan.to_read);
ECUtil::shard_extent_set_t ref_read(sinfo.get_k_plus_m());
- ref_read[shard_id_t(1)].insert(4096, 4096);
+ ref_read[shard_id_t(1)].insert(EC_ALIGN_SIZE, EC_ALIGN_SIZE);
ASSERT_EQ(ref_read, plan.to_read);
// Writes should grow to 4k
ECUtil::shard_extent_set_t ref_write(sinfo.get_k_plus_m());
- ref_write[shard_id_t(0)].insert(0, 8192);
- ref_write[shard_id_t(1)].insert(0, 8192);
+ ref_write[shard_id_t(0)].insert(0, 2 * EC_ALIGN_SIZE);
+ ref_write[shard_id_t(1)].insert(0, 2 * EC_ALIGN_SIZE);
ASSERT_EQ(ref_write, plan.will_write);
}
/* Populate the cache LRU and then invalidate the cache. */
{
uint64_t bs = 3767;
- auto io1 = iset_from_vector({{{align_page_prev(35*bs), align_page_next(36*bs) - align_page_prev(35*bs)}}}, cl.get_stripe_info());
+ auto io1 = iset_from_vector({{{align_prev(35*bs), align_next(36*bs) - align_prev(35*bs)}}}, cl.get_stripe_info());
io1[shard_id_t(k)].insert(io1.get_extent_superset());
io1[shard_id_t(k+1)].insert(io1.get_extent_superset());
- auto io2 = iset_from_vector({{{align_page_prev(18*bs), align_page_next(19*bs) - align_page_prev(18*bs)}}}, cl.get_stripe_info());
+ auto io2 = iset_from_vector({{{align_prev(18*bs), align_next(19*bs) - align_prev(18*bs)}}}, cl.get_stripe_info());
io2[shard_id_t(k)].insert(io1.get_extent_superset());
io2[shard_id_t(k+1)].insert(io1.get_extent_superset());
// io 3 is the truncate
auto io3 = shard_extent_set_t(cl.sinfo.get_k_plus_m());
- auto io4 = iset_from_vector({{{align_page_prev(30*bs), align_page_next(31*bs) - align_page_prev(30*bs)}}}, cl.get_stripe_info());
+ auto io4 = iset_from_vector({{{align_prev(30*bs), align_next(31*bs) - align_prev(30*bs)}}}, cl.get_stripe_info());
io3[shard_id_t(k)].insert(io1.get_extent_superset());
io3[shard_id_t(k+1)].insert(io1.get_extent_superset());
- auto io5 = iset_from_vector({{{align_page_prev(18*bs), align_page_next(19*bs) - align_page_prev(18*bs)}}}, cl.get_stripe_info());
+ auto io5 = iset_from_vector({{{align_prev(18*bs), align_next(19*bs) - align_prev(18*bs)}}}, cl.get_stripe_info());
io4[shard_id_t(k)].insert(io1.get_extent_superset());
io4[shard_id_t(k+1)].insert(io1.get_extent_superset());
- optional op1 = cl.cache.prepare(cl.oid, nullopt, io1, 0, align_page_next(36*bs), false,
+ optional op1 = cl.cache.prepare(cl.oid, nullopt, io1, 0, align_next(36*bs), false,
[&cl](ECExtentCache::OpRef &op)
{
cl.cache_ready(op->get_hoid(), op->get_result());
cl.complete_write(*op1);
op1.reset();
- optional op2 = cl.cache.prepare(cl.oid, io2, io2, align_page_next(36*bs), align_page_next(36*bs), false,
+ optional op2 = cl.cache.prepare(cl.oid, io2, io2, align_next(36*bs), align_next(36*bs), false,
[&cl](ECExtentCache::OpRef &op)
{
cl.cache_ready(op->get_hoid(), op->get_result());
cl.complete_write(*op2);
op2.reset();
- optional op3 = cl.cache.prepare(cl.oid, nullopt, io3, align_page_next(36*bs), 0, false,
+ optional op3 = cl.cache.prepare(cl.oid, nullopt, io3, align_next(36*bs), 0, false,
[&cl](ECExtentCache::OpRef &op)
{
cl.cache_ready(op->get_hoid(), op->get_result());
cl.complete_write(*op3);
op3.reset();
- optional op4 = cl.cache.prepare(cl.oid, nullopt, io4, 0, align_page_next(30*bs), false,
+ optional op4 = cl.cache.prepare(cl.oid, nullopt, io4, 0, align_next(30*bs), false,
[&cl](ECExtentCache::OpRef &op)
{
cl.cache_ready(op->get_hoid(), op->get_result());
cl.complete_write(*op4);
op4.reset();
- optional op5 = cl.cache.prepare(cl.oid, io5, io5, align_page_next(30*bs), align_page_next(30*bs), false,
+ optional op5 = cl.cache.prepare(cl.oid, io5, io5, align_next(30*bs), align_next(30*bs), false,
[&cl](ECExtentCache::OpRef &op)
{
cl.cache_ready(op->get_hoid(), op->get_result());