From: Ilya Dryomov Date: Tue, 26 Feb 2019 15:21:10 +0000 (+0100) Subject: include/intarith: enforce the same type for p2*() arguments X-Git-Tag: v13.2.6~82^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=3244c871f36ee22ee826a96f30e82a8d05c9170c;p=ceph.git include/intarith: enforce the same type for p2*() arguments x and align must be of the same width. Otherwise the lack of sign-extension can bite, e.g.: uint64_t x = 128000; uint32_t align = 32768; uint64_t res = p2roundup(x, align); // res = 18446744069414715392 While the templates added in commit c06b97b3d7e3 ("include: Add templates along side macros in intarith") are technically correct, P2*() macros weren't meant to be used with different types. The Solaris kernel (which is where they originated) has P2*_TYPED() variants for that. Signed-off-by: Ilya Dryomov (cherry picked from commit d0293dd2d8c0327c9d4d6b826a966dd7b18ea318) Conflicts: src/librbd/io/ImageRequest.cc src/msg/async/frames_v2.h src/os/bluestore/StupidAllocator.cc --- diff --git a/src/auth/Crypto.cc b/src/auth/Crypto.cc index 359a994362c44..7903455d64fbd 100644 --- a/src/auth/Crypto.cc +++ b/src/auth/Crypto.cc @@ -233,7 +233,7 @@ public: // 16 + p2align(10, 16) -> 16 // 16 + p2align(16, 16) -> 32 including 16 bytes for padding. ceph::bufferptr out_tmp{static_cast( - AES_BLOCK_LEN + p2align(in.length(), AES_BLOCK_LEN))}; + AES_BLOCK_LEN + p2align(in.length(), AES_BLOCK_LEN))}; // let's pad the data std::uint8_t pad_len = out_tmp.length() - in.length(); @@ -300,9 +300,7 @@ public: if (out.buf == nullptr) { // 16 + p2align(10, 16) -> 16 // 16 + p2align(16, 16) -> 32 - const std::size_t needed = \ - AES_BLOCK_LEN + p2align(in.length, AES_BLOCK_LEN); - return needed; + return AES_BLOCK_LEN + p2align(in.length, AES_BLOCK_LEN); } // how many bytes of in.buf hang outside the alignment boundary and how diff --git a/src/include/intarith.h b/src/include/intarith.h index 242056e216f2e..e912cbe7b3c44 100644 --- a/src/include/intarith.h +++ b/src/include/intarith.h @@ -53,8 +53,8 @@ constexpr inline bool isp2(T x) { * eg, p2align(0x1234, 0x100) == 0x1200 (0x12*align) * eg, p2align(0x5600, 0x100) == 0x5600 (0x56*align) */ -template -constexpr inline std::make_unsigned_t> p2align(T x, U align) { +template +constexpr inline T p2align(T x, T align) { return x & -align; } @@ -63,8 +63,8 @@ constexpr inline std::make_unsigned_t> p2align(T x, U a * eg, p2phase(0x1234, 0x100) == 0x34 (x-0x12*align) * eg, p2phase(0x5600, 0x100) == 0x00 (x-0x56*align) */ -template -constexpr inline std::make_unsigned_t> p2phase(T x, U align) { +template +constexpr inline T p2phase(T x, T align) { return x & (align - 1); } @@ -74,8 +74,8 @@ constexpr inline std::make_unsigned_t> p2phase(T x, U a * eg, p2nphase(0x1234, 0x100) == 0xcc (0x13*align-x) * eg, p2nphase(0x5600, 0x100) == 0x00 (0x56*align-x) */ -template -constexpr inline std::make_unsigned_t> p2nphase(T x, U align) { +template +constexpr inline T p2nphase(T x, T align) { return -x & (align - 1); } @@ -84,9 +84,9 @@ constexpr inline std::make_unsigned_t> p2nphase(T x, U * eg, p2roundup(0x1234, 0x100) == 0x1300 (0x13*align) * eg, p2roundup(0x5600, 0x100) == 0x5600 (0x56*align) */ -template -constexpr inline std::make_unsigned_t> p2roundup(T x, U align) { - return (-(-(x) & -(align))); +template +constexpr inline T p2roundup(T x, T align) { + return -(-x & -align); } // count trailing zeros. diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index 94fa4362dd58f..82e6c16105451 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -10382,7 +10382,7 @@ void BlueStore::_do_write_small( auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size); auto min_off = offset >= max_bsize ? offset - max_bsize : 0; uint32_t alloc_len = min_alloc_size; - auto offset0 = p2align(offset, alloc_len); + auto offset0 = p2align(offset, alloc_len); bool any_change; @@ -10646,7 +10646,7 @@ void BlueStore::_do_write_small( // new blob. BlobRef b = c->new_blob(); - uint64_t b_off = p2phase(offset, alloc_len); + uint64_t b_off = p2phase(offset, alloc_len); uint64_t b_off0 = b_off; _pad_zeros(&bl, &b_off0, block_size); o->extent_map.punch_hole(c, offset, length, &wctx->old_extents); diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index 899716e014e1b..35d6d91e1cdff 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -256,7 +256,7 @@ double StupidAllocator::get_fragmentation(uint64_t alloc_unit) uint64_t intervals = 0; { std::lock_guard l(lock); - max_intervals = p2roundup(num_free, alloc_unit) / alloc_unit; + max_intervals = p2roundup(num_free, alloc_unit) / alloc_unit; for (unsigned bin = 0; bin < free.size(); ++bin) { intervals += free[bin].num_intervals(); } diff --git a/src/os/bluestore/fastbmap_allocator_impl.cc b/src/os/bluestore/fastbmap_allocator_impl.cc index 424f8051871d6..75b70bd582dd7 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.cc +++ b/src/os/bluestore/fastbmap_allocator_impl.cc @@ -23,7 +23,7 @@ inline interval_t _align2units(uint64_t offset, uint64_t len, uint64_t min_lengt auto delta_off = res.offset - offset; if (len > delta_off) { res.length = len - delta_off; - res.length = p2align(res.length, min_length); + res.length = p2align(res.length, min_length); if (res.length) { return res; } @@ -190,7 +190,7 @@ void AllocatorLevel01Loose::_analyze_partials(uint64_t pos_start, (ctx->min_affordable_len == 0 || (longest.length < ctx->min_affordable_len))) { - ctx->min_affordable_len = p2align(longest.length, min_length); + ctx->min_affordable_len = p2align(longest.length, min_length); ctx->min_affordable_offs = longest.offset; } if (mode == STOP_ON_PARTIAL) { @@ -288,13 +288,13 @@ void AllocatorLevel01Loose::_mark_alloc_l0(int64_t l0_pos_start, int64_t pos = l0_pos_start; slot_t bits = (slot_t)1 << (l0_pos_start % d0); - while (pos < std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start, d0))) { + while (pos < std::min(l0_pos_end, p2roundup(l0_pos_start, d0))) { l0[pos / d0] &= ~bits; bits <<= 1; pos++; } - while (pos < std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0))) { + while (pos < std::min(l0_pos_end, p2align(l0_pos_end, d0))) { l0[pos / d0] = all_slot_clear; pos += d0; } diff --git a/src/os/bluestore/fastbmap_allocator_impl.h b/src/os/bluestore/fastbmap_allocator_impl.h index e490215a12d3f..8adf1bbc532a1 100755 --- a/src/os/bluestore/fastbmap_allocator_impl.h +++ b/src/os/bluestore/fastbmap_allocator_impl.h @@ -335,13 +335,14 @@ protected: auto pos = l0_pos_start; slot_t bits = (slot_t)1 << (l0_pos_start % d0); slot_t& val_s = l0[pos / d0]; - int64_t pos_e = std::min(l0_pos_end, (int64_t)p2roundup(l0_pos_start + 1, d0)); + int64_t pos_e = std::min(l0_pos_end, + p2roundup(l0_pos_start + 1, d0)); while (pos < pos_e) { val_s |= bits; bits <<= 1; pos++; } - pos_e = std::min(l0_pos_end, (int64_t)p2align(l0_pos_end, d0)); + pos_e = std::min(l0_pos_end, p2align(l0_pos_end, d0)); auto idx = pos / d0; while (pos < pos_e) { l0[idx++] = all_slot_set;