// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32 including 16 bytes for padding.
ceph::bufferptr out_tmp{static_cast<unsigned>(
- AES_BLOCK_LEN + p2align(in.length(), AES_BLOCK_LEN))};
+ AES_BLOCK_LEN + p2align<std::size_t>(in.length(), AES_BLOCK_LEN))};
// let's pad the data
std::uint8_t pad_len = out_tmp.length() - in.length();
if (out.buf == nullptr) {
// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32
- const std::size_t needed = \
- AES_BLOCK_LEN + p2align(in.length, AES_BLOCK_LEN);
- return needed;
+ return AES_BLOCK_LEN + p2align<std::size_t>(in.length, AES_BLOCK_LEN);
}
// how many bytes of in.buf hang outside the alignment boundary and how
* eg, p2align(0x1234, 0x100) == 0x1200 (0x12*align)
* eg, p2align(0x5600, 0x100) == 0x5600 (0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2align(T x, U align) {
+template<typename T>
+constexpr inline T p2align(T x, T align) {
return x & -align;
}
* eg, p2phase(0x1234, 0x100) == 0x34 (x-0x12*align)
* eg, p2phase(0x5600, 0x100) == 0x00 (x-0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2phase(T x, U align) {
+template<typename T>
+constexpr inline T p2phase(T x, T align) {
return x & (align - 1);
}
* eg, p2nphase(0x1234, 0x100) == 0xcc (0x13*align-x)
* eg, p2nphase(0x5600, 0x100) == 0x00 (0x56*align-x)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2nphase(T x, U align) {
+template<typename T>
+constexpr inline T p2nphase(T x, T align) {
return -x & (align - 1);
}
* eg, p2roundup(0x1234, 0x100) == 0x1300 (0x13*align)
* eg, p2roundup(0x5600, 0x100) == 0x5600 (0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2roundup(T x, U align) {
- return (-(-(x) & -(align)));
+template<typename T>
+constexpr inline T p2roundup(T x, T align) {
+ return -(-x & -align);
}
// count trailing zeros.
// is a special case for filestore
bool prune_required = false;
auto object_size = this->m_image_ctx.layout.object_size;
- auto discard_granularity_bytes = std::min<uint64_t>(
- m_discard_granularity_bytes, object_size);
+ auto discard_granularity_bytes = std::min(m_discard_granularity_bytes,
+ object_size);
auto xform_lambda =
[discard_granularity_bytes, object_size, &prune_required]
(ObjectExtent& object_extent) {
if ((discard_granularity_bytes < object_size) ||
(next_offset < object_size)) {
- static_assert(sizeof(offset) == sizeof(discard_granularity_bytes));
- offset = p2roundup(offset, discard_granularity_bytes);
- next_offset = p2align(next_offset, discard_granularity_bytes);
+ offset = p2roundup<uint64_t>(offset, discard_granularity_bytes);
+ next_offset = p2align<uint64_t>(next_offset, discard_granularity_bytes);
if (offset >= next_offset) {
prune_required = true;
length = 0;
static uint32_t segment_onwire_size(const uint32_t logical_size)
{
- return p2roundup(logical_size, CRYPTO_BLOCK_SIZE);
+ return p2roundup<uint32_t>(logical_size, CRYPTO_BLOCK_SIZE);
}
static ceph::bufferlist segment_onwire_bufferlist(ceph::bufferlist&& bl)
auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size);
auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
uint32_t alloc_len = min_alloc_size;
- auto offset0 = p2align(offset, alloc_len);
+ auto offset0 = p2align<uint64_t>(offset, alloc_len);
bool any_change;
// new blob.
BlobRef b = c->new_blob();
- uint64_t b_off = p2phase(offset, alloc_len);
+ uint64_t b_off = p2phase<uint64_t>(offset, alloc_len);
uint64_t b_off0 = b_off;
_pad_zeros(&bl, &b_off0, block_size);
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
uint64_t intervals = 0;
{
std::lock_guard l(lock);
- max_intervals = p2roundup(num_free, alloc_unit) / alloc_unit;
+ max_intervals = p2roundup<uint64_t>(num_free, alloc_unit) / alloc_unit;
for (unsigned bin = 0; bin < free.size(); ++bin) {
intervals += free[bin].num_intervals();
}
auto delta_off = res.offset - offset;
if (len > delta_off) {
res.length = len - delta_off;
- res.length = p2align(res.length, min_length);
+ res.length = p2align<uint32_t>(res.length, min_length);
if (res.length) {
return res;
}
(ctx->min_affordable_len == 0 ||
(longest.length < ctx->min_affordable_len))) {
- ctx->min_affordable_len = p2align(longest.length, min_length);
+ ctx->min_affordable_len = p2align<uint32_t>(longest.length, min_length);
ctx->min_affordable_offs = longest.offset;
}
if (mode == STOP_ON_PARTIAL) {
int64_t pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
- while (pos < std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start, d0))) {
+ while (pos < std::min(l0_pos_end, p2roundup<int64_t>(l0_pos_start, d0))) {
l0[pos / d0] &= ~bits;
bits <<= 1;
pos++;
}
- while (pos < std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0))) {
+ while (pos < std::min(l0_pos_end, p2align<int64_t>(l0_pos_end, d0))) {
l0[pos / d0] = all_slot_clear;
pos += d0;
}
auto pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
slot_t& val_s = l0[pos / d0];
- int64_t pos_e = std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start + 1, d0));
+ int64_t pos_e = std::min(l0_pos_end,
+ p2roundup<int64_t>(l0_pos_start + 1, d0));
while (pos < pos_e) {
val_s |= bits;
bits <<= 1;
pos++;
}
- pos_e = std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0));
+ pos_e = std::min(l0_pos_end, p2align<int64_t>(l0_pos_end, d0));
auto idx = pos / d0;
while (pos < pos_e) {
l0[idx++] = all_slot_set;