x and align must be of the same width. Otherwise the lack of
sign-extension can bite, e.g.:
uint64_t x = 128000;
uint32_t align = 32768;
uint64_t res = p2roundup(x, align);
// res =
18446744069414715392
While the templates added in commit
c06b97b3d7e3 ("include: Add
templates along side macros in intarith") are technically correct,
P2*() macros weren't meant to be used with different types. The
Solaris kernel (which is where they originated) has P2*_TYPED()
variants for that.
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
(cherry picked from commit
d0293dd2d8c0327c9d4d6b826a966dd7b18ea318)
Conflicts:
src/librbd/io/ImageRequest.cc
src/msg/async/frames_v2.h
src/os/bluestore/StupidAllocator.cc
// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32 including 16 bytes for padding.
ceph::bufferptr out_tmp{static_cast<unsigned>(
- AES_BLOCK_LEN + p2align(in.length(), AES_BLOCK_LEN))};
+ AES_BLOCK_LEN + p2align<std::size_t>(in.length(), AES_BLOCK_LEN))};
// let's pad the data
std::uint8_t pad_len = out_tmp.length() - in.length();
if (out.buf == nullptr) {
// 16 + p2align(10, 16) -> 16
// 16 + p2align(16, 16) -> 32
- const std::size_t needed = \
- AES_BLOCK_LEN + p2align(in.length, AES_BLOCK_LEN);
- return needed;
+ return AES_BLOCK_LEN + p2align<std::size_t>(in.length, AES_BLOCK_LEN);
}
// how many bytes of in.buf hang outside the alignment boundary and how
* eg, p2align(0x1234, 0x100) == 0x1200 (0x12*align)
* eg, p2align(0x5600, 0x100) == 0x5600 (0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2align(T x, U align) {
+template<typename T>
+constexpr inline T p2align(T x, T align) {
return x & -align;
}
* eg, p2phase(0x1234, 0x100) == 0x34 (x-0x12*align)
* eg, p2phase(0x5600, 0x100) == 0x00 (x-0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2phase(T x, U align) {
+template<typename T>
+constexpr inline T p2phase(T x, T align) {
return x & (align - 1);
}
* eg, p2nphase(0x1234, 0x100) == 0xcc (0x13*align-x)
* eg, p2nphase(0x5600, 0x100) == 0x00 (0x56*align-x)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2nphase(T x, U align) {
+template<typename T>
+constexpr inline T p2nphase(T x, T align) {
return -x & (align - 1);
}
* eg, p2roundup(0x1234, 0x100) == 0x1300 (0x13*align)
* eg, p2roundup(0x5600, 0x100) == 0x5600 (0x56*align)
*/
-template<typename T, typename U>
-constexpr inline std::make_unsigned_t<std::common_type_t<T, U>> p2roundup(T x, U align) {
- return (-(-(x) & -(align)));
+template<typename T>
+constexpr inline T p2roundup(T x, T align) {
+ return -(-x & -align);
}
// count trailing zeros.
auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size);
auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
uint32_t alloc_len = min_alloc_size;
- auto offset0 = p2align(offset, alloc_len);
+ auto offset0 = p2align<uint64_t>(offset, alloc_len);
bool any_change;
// new blob.
BlobRef b = c->new_blob();
- uint64_t b_off = p2phase(offset, alloc_len);
+ uint64_t b_off = p2phase<uint64_t>(offset, alloc_len);
uint64_t b_off0 = b_off;
_pad_zeros(&bl, &b_off0, block_size);
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
uint64_t intervals = 0;
{
std::lock_guard<std::mutex> l(lock);
- max_intervals = p2roundup(num_free, alloc_unit) / alloc_unit;
+ max_intervals = p2roundup<uint64_t>(num_free, alloc_unit) / alloc_unit;
for (unsigned bin = 0; bin < free.size(); ++bin) {
intervals += free[bin].num_intervals();
}
auto delta_off = res.offset - offset;
if (len > delta_off) {
res.length = len - delta_off;
- res.length = p2align(res.length, min_length);
+ res.length = p2align<uint32_t>(res.length, min_length);
if (res.length) {
return res;
}
(ctx->min_affordable_len == 0 ||
(longest.length < ctx->min_affordable_len))) {
- ctx->min_affordable_len = p2align(longest.length, min_length);
+ ctx->min_affordable_len = p2align<uint32_t>(longest.length, min_length);
ctx->min_affordable_offs = longest.offset;
}
if (mode == STOP_ON_PARTIAL) {
int64_t pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
- while (pos < std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start, d0))) {
+ while (pos < std::min(l0_pos_end, p2roundup<int64_t>(l0_pos_start, d0))) {
l0[pos / d0] &= ~bits;
bits <<= 1;
pos++;
}
- while (pos < std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0))) {
+ while (pos < std::min(l0_pos_end, p2align<int64_t>(l0_pos_end, d0))) {
l0[pos / d0] = all_slot_clear;
pos += d0;
}
auto pos = l0_pos_start;
slot_t bits = (slot_t)1 << (l0_pos_start % d0);
slot_t& val_s = l0[pos / d0];
- int64_t pos_e = std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start + 1, d0));
+ int64_t pos_e = std::min(l0_pos_end,
+ p2roundup<int64_t>(l0_pos_start + 1, d0));
while (pos < pos_e) {
val_s |= bits;
bits <<= 1;
pos++;
}
- pos_e = std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0));
+ pos_e = std::min(l0_pos_end, p2align<int64_t>(l0_pos_end, d0));
auto idx = pos / d0;
while (pos < pos_e) {
l0[idx++] = all_slot_set;