From 28d35abf894caccd6ccbf7abb3ad1e0f116643c7 Mon Sep 17 00:00:00 2001 From: "Adam C. Emerson" Date: Wed, 10 Jan 2018 22:24:46 -0500 Subject: [PATCH] rados: Switch MIN/MAX for std::min/max and use intarith templates Signed-off-by: Adam C. Emerson --- src/auth/cephx/CephxKeyServer.cc | 2 +- src/common/bit_vector.hpp | 4 +- src/common/buffer.cc | 18 ++-- src/compressor/zlib/ZlibCompressor.cc | 2 +- src/crush/CrushCompiler.cc | 2 +- src/journal/Journaler.cc | 4 +- src/librados/librados.cc | 4 +- src/mds/Locker.cc | 2 +- src/mon/OSDMonitor.cc | 22 ++--- src/mon/PGMap.cc | 12 +-- src/msg/async/AsyncConnection.cc | 8 +- src/msg/async/PosixStack.cc | 2 +- src/msg/simple/Pipe.cc | 10 +-- src/os/bluestore/BitAllocator.cc | 2 +- src/os/bluestore/BitMapAllocator.cc | 18 ++-- src/os/bluestore/BitmapFreelistManager.cc | 4 +- src/os/bluestore/BlueFS.cc | 28 +++--- src/os/bluestore/BlueStore.cc | 105 +++++++++++----------- src/os/bluestore/StupidAllocator.cc | 2 +- src/os/bluestore/bluestore_types.cc | 10 +-- src/os/bluestore/bluestore_types.h | 16 ++-- src/os/filestore/FileJournal.cc | 6 +- src/os/filestore/FileJournal.h | 2 +- src/os/kstore/KStore.cc | 10 +-- src/os/memstore/MemStore.cc | 14 +-- src/osd/ECBackend.h | 2 +- src/osd/OSD.cc | 30 +++---- src/osd/PG.cc | 20 ++--- src/osd/PGLog.cc | 10 +-- src/osd/PrimaryLogPG.cc | 76 ++++++++-------- src/osd/ReplicatedBackend.cc | 2 +- src/osd/osd_types.cc | 2 +- src/osdc/Striper.cc | 8 +- src/test/compressor/compressor_example.h | 2 +- src/test/compressor/test_compression.cc | 2 +- src/test/librados/misc.cc | 3 +- src/test/objectstore/store_test.cc | 16 ++-- src/test/osdc/object_cacher_stress.cc | 4 +- 38 files changed, 248 insertions(+), 238 deletions(-) diff --git a/src/auth/cephx/CephxKeyServer.cc b/src/auth/cephx/CephxKeyServer.cc index 492c99f913b09..588a0d9ffa6aa 100644 --- a/src/auth/cephx/CephxKeyServer.cc +++ b/src/auth/cephx/CephxKeyServer.cc @@ -162,7 +162,7 @@ bool KeyServer::_check_rotating_secrets() ldout(cct, 10) << __func__ << " added " << added << dendl; data.rotating_ver++; //data.next_rotating_time = ceph_clock_now(cct); - //data.next_rotating_time += MIN(cct->_conf->auth_mon_ticket_ttl, cct->_conf->auth_service_ticket_ttl); + //data.next_rotating_time += std::min(cct->_conf->auth_mon_ticket_ttl, cct->_conf->auth_service_ticket_ttl); _dump_rotating_secrets(); return true; } diff --git a/src/common/bit_vector.hpp b/src/common/bit_vector.hpp index 859df6137d452..88a81296bc5e6 100644 --- a/src/common/bit_vector.hpp +++ b/src/common/bit_vector.hpp @@ -310,7 +310,7 @@ void BitVector<_b>::encode_data(bufferlist& bl, uint64_t byte_offset, uint64_t end_offset = byte_offset + byte_length; while (byte_offset < end_offset) { - uint64_t len = MIN(BLOCK_SIZE, end_offset - byte_offset); + uint64_t len = std::min(BLOCK_SIZE, end_offset - byte_offset); bufferlist bit; bit.substr_of(m_data, byte_offset, len); @@ -339,7 +339,7 @@ void BitVector<_b>::decode_data(bufferlist::iterator& it, uint64_t byte_offset) } while (byte_offset < end_offset) { - uint64_t len = MIN(BLOCK_SIZE, end_offset - byte_offset); + uint64_t len = std::min(BLOCK_SIZE, end_offset - byte_offset); bufferptr ptr; it.copy_deep(len, ptr); diff --git a/src/common/buffer.cc b/src/common/buffer.cc index 6e0cca833568d..11196de758c47 100644 --- a/src/common/buffer.cc +++ b/src/common/buffer.cc @@ -39,7 +39,7 @@ using namespace ceph; -#define CEPH_BUFFER_ALLOC_UNIT (MIN(CEPH_PAGE_SIZE, 4096)) +#define CEPH_BUFFER_ALLOC_UNIT (std::min(CEPH_PAGE_SIZE, 4096u)) #define CEPH_BUFFER_APPEND_SIZE (CEPH_BUFFER_ALLOC_UNIT - sizeof(raw_combined)) #ifdef BUFFER_DEBUG @@ -296,9 +296,9 @@ public: int mempool = mempool::mempool_buffer_anon) { if (!align) align = sizeof(size_t); - size_t rawlen = ROUND_UP_TO(sizeof(buffer::raw_combined), + size_t rawlen = round_up_to(sizeof(buffer::raw_combined), alignof(buffer::raw_combined)); - size_t datalen = ROUND_UP_TO(len, alignof(buffer::raw_combined)); + size_t datalen = round_up_to(len, alignof(buffer::raw_combined)); #ifdef DARWIN char *ptr = (char *) valloc(rawlen + datalen); @@ -1355,7 +1355,7 @@ public: } } *data = p->c_str() + p_off; - size_t l = MIN(p->length() - p_off, want); + size_t l = std::min(p->length() - p_off, want); p_off += l; if (p_off == p->length()) { ++p; @@ -1369,7 +1369,7 @@ public: uint32_t buffer::list::iterator_impl::crc32c( size_t length, uint32_t crc) { - length = MIN( length, get_remaining()); + length = std::min(length, get_remaining()); while (length > 0) { const char *p; size_t l = get_ptr_and_advance(length, &p); @@ -1763,7 +1763,7 @@ public: if (max_buffers && _buffers.size() > max_buffers && _len > (max_buffers * align_size)) { - align_size = ROUND_UP_TO(ROUND_UP_TO(_len, max_buffers) / max_buffers, align_size); + align_size = round_up_to(round_up_to(_len, max_buffers) / max_buffers, align_size); } std::list::iterator p = _buffers.begin(); while (p != _buffers.end()) { @@ -1940,8 +1940,8 @@ public: // make a new append_buffer. fill out a complete page, factoring in the // raw_combined overhead. - size_t need = ROUND_UP_TO(len, sizeof(size_t)) + sizeof(raw_combined); - size_t alen = ROUND_UP_TO(need, CEPH_BUFFER_ALLOC_UNIT) - + size_t need = round_up_to(len, sizeof(size_t)) + sizeof(raw_combined); + size_t alen = round_up_to(need, CEPH_BUFFER_ALLOC_UNIT) - sizeof(raw_combined); append_buffer = raw_combined::create(alen, 0, get_mempool()); append_buffer.set_length(0); // unused, so far. @@ -2457,7 +2457,7 @@ int buffer::list::write_fd(int fd, uint64_t offset) const while (left_pbrs) { ssize_t bytes = 0; unsigned iovlen = 0; - uint64_t size = MIN(left_pbrs, IOV_MAX); + uint64_t size = std::min(left_pbrs, IOV_MAX); left_pbrs -= size; while (size > 0) { iov[iovlen].iov_base = (void *)p->c_str(); diff --git a/src/compressor/zlib/ZlibCompressor.cc b/src/compressor/zlib/ZlibCompressor.cc index 492e6328d3615..a6a9722bfc667 100644 --- a/src/compressor/zlib/ZlibCompressor.cc +++ b/src/compressor/zlib/ZlibCompressor.cc @@ -196,7 +196,7 @@ int ZlibCompressor::decompress(bufferlist::iterator &p, size_t compressed_size, return -1; } - size_t remaining = MIN(p.get_remaining(), compressed_size); + size_t remaining = std::min(p.get_remaining(), compressed_size); while(remaining) { long unsigned int len = p.get_ptr_and_advance(remaining, &c_in); diff --git a/src/crush/CrushCompiler.cc b/src/crush/CrushCompiler.cc index 6ac892a15be40..47fe77629c706 100644 --- a/src/crush/CrushCompiler.cc +++ b/src/crush/CrushCompiler.cc @@ -647,7 +647,7 @@ int CrushCompiler::parse_bucket(iter_t const& i) // now do the items. if (!used_items.empty()) - size = MAX(size, *used_items.rbegin()); + size = std::max(size, *used_items.rbegin()); vector items(size); vector weights(size); diff --git a/src/journal/Journaler.cc b/src/journal/Journaler.cc index d9517f0621c15..5b580c50005db 100644 --- a/src/journal/Journaler.cc +++ b/src/journal/Journaler.cc @@ -419,8 +419,8 @@ uint64_t Journaler::get_max_append_size() const { uint64_t max_payload_size = m_metadata->get_object_size() - Entry::get_fixed_size(); if (m_metadata->get_settings().max_payload_bytes > 0) { - max_payload_size = MIN(max_payload_size, - m_metadata->get_settings().max_payload_bytes); + max_payload_size = std::min(max_payload_size, + m_metadata->get_settings().max_payload_bytes); } return max_payload_size; } diff --git a/src/librados/librados.cc b/src/librados/librados.cc index 329b145cd2da2..caa721b68404b 100644 --- a/src/librados/librados.cc +++ b/src/librados/librados.cc @@ -2533,7 +2533,7 @@ int librados::Rados::get_pool_stats(std::list& v, ++p) { pool_stat_t& pv = result[p->first]; object_stat_sum_t *sum = &p->second.stats.sum; - pv.num_kb = SHIFT_ROUND_UP(sum->num_bytes, 10); + pv.num_kb = shift_round_up(sum->num_bytes, 10); pv.num_bytes = sum->num_bytes; pv.num_objects = sum->num_objects; pv.num_object_clones = sum->num_object_clones; @@ -3652,7 +3652,7 @@ extern "C" int rados_ioctx_pool_stat(rados_ioctx_t io, struct rados_pool_stat_t } ::pool_stat_t& r = rawresult[pool_name]; - stats->num_kb = SHIFT_ROUND_UP(r.stats.sum.num_bytes, 10); + stats->num_kb = shift_round_up(r.stats.sum.num_bytes, 10); stats->num_bytes = r.stats.sum.num_bytes; stats->num_objects = r.stats.sum.num_objects; stats->num_object_clones = r.stats.sum.num_object_clones; diff --git a/src/mds/Locker.cc b/src/mds/Locker.cc index 73661a8d60b8c..f1f5bcd713d57 100644 --- a/src/mds/Locker.cc +++ b/src/mds/Locker.cc @@ -2301,7 +2301,7 @@ uint64_t Locker::calc_new_max_size(inode_t *pi, uint64_t size) max_inc *= pi->layout.object_size; new_max = std::min(new_max, size + max_inc); } - return ROUND_UP_TO(new_max, pi->get_layout_size_increment()); + return round_up_to(new_max, pi->get_layout_size_increment()); } void Locker::calc_new_client_ranges(CInode *in, uint64_t size, diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index aea742fdc6c7a..bb4bfd5e35682 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -688,7 +688,7 @@ OSDMonitor::update_pending_pgs(const OSDMap::Incremental& inc) } // process queue - unsigned max = MAX(1, g_conf->mon_osd_max_creating_pgs); + unsigned max = std::max(1, g_conf->mon_osd_max_creating_pgs); const auto total = pending_creatings.pgs.size(); while (pending_creatings.pgs.size() < max && !pending_creatings.queue.empty()) { @@ -699,7 +699,7 @@ OSDMonitor::update_pending_pgs(const OSDMap::Incremental& inc) << " modified " << p->second.modified << " [" << p->second.start << "-" << p->second.end << ")" << dendl; - int n = MIN(max - pending_creatings.pgs.size(), + int n = std::min(max - pending_creatings.pgs.size(), p->second.end - p->second.start); ps_t first = p->second.start; ps_t end = first + n; @@ -1414,7 +1414,7 @@ bool OSDMonitor::is_pool_currently_all_bluestore(int64_t pool_id, // just check a few pgs for efficiency - this can't give a guarantee anyway, // since filestore osds could always join the pool later set checked_osds; - for (unsigned ps = 0; ps < MIN(8, pool.get_pg_num()); ++ps) { + for (unsigned ps = 0; ps < std::min(8u, pool.get_pg_num()); ++ps) { vector up, acting; pg_t pgid(ps, pool_id, -1); osdmap.pg_to_up_acting_osds(pgid, up, acting); @@ -1680,14 +1680,14 @@ bool OSDMonitor::preprocess_get_osdmap(MonOpRequestRef op) epoch_t first = get_first_committed(); epoch_t last = osdmap.get_epoch(); int max = g_conf->osd_map_message_max; - for (epoch_t e = MAX(first, m->get_full_first()); - e <= MIN(last, m->get_full_last()) && max > 0; + for (epoch_t e = std::max(first, m->get_full_first()); + e <= std::min(last, m->get_full_last()) && max > 0; ++e, --max) { int r = get_version_full(e, reply->maps[e]); assert(r >= 0); } - for (epoch_t e = MAX(first, m->get_inc_first()); - e <= MIN(last, m->get_inc_last()) && max > 0; + for (epoch_t e = std::max(first, m->get_inc_first()); + e <= std::min(last, m->get_inc_last()) && max > 0; ++e, --max) { int r = get_version(e, reply->incremental_maps[e]); assert(r >= 0); @@ -3070,8 +3070,8 @@ void OSDMonitor::send_incremental(epoch_t first, } while (first <= osdmap.get_epoch()) { - epoch_t last = MIN(first + g_conf->osd_map_message_max - 1, - osdmap.get_epoch()); + epoch_t last = std::min(first + g_conf->osd_map_message_max - 1, + osdmap.get_epoch()); MOSDMap *m = build_incremental(first, last); if (req) { @@ -5517,7 +5517,7 @@ int OSDMonitor::prepare_pool_size(const unsigned pool_type, err = get_erasure_code(erasure_code_profile, &erasure_code, ss); if (err == 0) { *size = erasure_code->get_chunk_count(); - *min_size = MIN(erasure_code->get_data_chunk_count() + 1, *size); + *min_size = std::min(erasure_code->get_data_chunk_count() + 1, *size); } } break; @@ -6025,7 +6025,7 @@ int OSDMonitor::prepare_command_pool_set(map &cmdmap, ss << "splits in cache pools must be followed by scrubs and leave sufficient free space to avoid overfilling. use --yes-i-really-mean-it to force."; return -EPERM; } - int expected_osds = MIN(p.get_pg_num(), osdmap.get_num_osds()); + int expected_osds = std::min(p.get_pg_num(), osdmap.get_num_osds()); int64_t new_pgs = n - p.get_pg_num(); if (new_pgs > g_conf->mon_osd_max_split_count * expected_osds) { ss << "specified pg_num " << n << " is too large (creating " diff --git a/src/mon/PGMap.cc b/src/mon/PGMap.cc index 13c2af0b5e6d1..884e99d11f402 100644 --- a/src/mon/PGMap.cc +++ b/src/mon/PGMap.cc @@ -273,7 +273,7 @@ void PGMapDigest::print_summary(Formatter *f, ostream *out) const { std::stringstream ss; ss << p->first; - max_width = MAX(ss.str().size(), max_width); + max_width = std::max(ss.str().size(), max_width); } for (multimap::reverse_iterator p = state_by_count.rbegin(); @@ -875,7 +875,7 @@ void PGMapDigest::dump_object_stat_sum( } if (f) { - f->dump_int("kb_used", SHIFT_ROUND_UP(sum.num_bytes, 10)); + f->dump_int("kb_used", shift_round_up(sum.num_bytes, 10)); f->dump_int("bytes_used", sum.num_bytes); f->dump_format_unquoted("percent_used", "%.2f", (used*100)); f->dump_unsigned("max_avail", avail / raw_used_rate); @@ -946,7 +946,7 @@ int64_t PGMap::get_rule_avail(const OSDMap& osdmap, int ruleno) const } double unusable = (double)osd_info->second.kb * (1.0 - fratio); - double avail = MAX(0.0, (double)osd_info->second.kb_avail - unusable); + double avail = std::max(0.0, (double)osd_info->second.kb_avail - unusable); avail *= 1024.0; int64_t proj = (int64_t)(avail / (double)p->second); if (min < 0 || proj < min) { @@ -3506,7 +3506,7 @@ int reweight::by_utilization( average_util = (double)num_pg_copies / weight_sum; } else { // by osd utilization - int num_osd = MAX(1, pgm.osd_stat.size()); + int num_osd = std::max(1, pgm.osd_stat.size()); if ((uint64_t)pgm.osd_sum.kb * 1024 / num_osd < g_conf->mon_reweight_min_bytes_per_osd) { *ss << "Refusing to reweight: we only have " << pgm.osd_sum.kb @@ -3599,7 +3599,7 @@ int reweight::by_utilization( // to represent e.g. differing storage capacities unsigned new_weight = (unsigned)((average_util / util) * (float)weight); if (weight > max_change) - new_weight = MAX(new_weight, weight - max_change); + new_weight = std::max(new_weight, weight - max_change); new_weights->insert({p.first, new_weight}); if (f) { f->open_object_section("osd"); @@ -3618,7 +3618,7 @@ int reweight::by_utilization( if (!no_increasing && util <= underload_util) { // assign a higher weight.. if we can. unsigned new_weight = (unsigned)((average_util / util) * (float)weight); - new_weight = MIN(new_weight, weight + max_change); + new_weight = std::min(new_weight, weight + max_change); if (new_weight > 0x10000) new_weight = 0x10000; if (new_weight > weight) { diff --git a/src/msg/async/AsyncConnection.cc b/src/msg/async/AsyncConnection.cc index 180d965d7256c..f639c11ddd1df 100644 --- a/src/msg/async/AsyncConnection.cc +++ b/src/msg/async/AsyncConnection.cc @@ -107,7 +107,7 @@ static void alloc_aligned_buffer(bufferlist& data, unsigned len, unsigned off) if (off & ~CEPH_PAGE_MASK) { // head alloc_len += CEPH_PAGE_SIZE; - head = MIN(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left); + head = std::min(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left); left -= head; } alloc_len += left; @@ -124,7 +124,7 @@ AsyncConnection::AsyncConnection(CephContext *cct, AsyncMessenger *m, DispatchQu state(STATE_NONE), state_after_send(STATE_NONE), port(-1), dispatch_queue(q), can_write(WriteStatus::NOWRITE), keepalive(false), recv_buf(NULL), - recv_max_prefetch(MAX(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)), + recv_max_prefetch(std::max(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)), recv_start(0), recv_end(0), last_active(ceph::coarse_mono_clock::now()), inactive_timeout_us(cct->_conf->ms_tcp_read_timeout*1000*1000), @@ -255,7 +255,7 @@ ssize_t AsyncConnection::read_until(unsigned len, char *p) ssize_t r = 0; uint64_t left = len - state_offset; if (recv_end > recv_start) { - uint64_t to_read = MIN(recv_end - recv_start, left); + uint64_t to_read = std::min(recv_end - recv_start, left); memcpy(p, recv_buf+recv_start, to_read); recv_start += to_read; left -= to_read; @@ -619,7 +619,7 @@ void AsyncConnection::process() { while (msg_left > 0) { bufferptr bp = data_blp.get_current_ptr(); - unsigned read = MIN(bp.length(), msg_left); + unsigned read = std::min(bp.length(), msg_left); r = read_until(read, bp.c_str()); if (r < 0) { ldout(async_msgr->cct, 1) << __func__ << " read data error " << dendl; diff --git a/src/msg/async/PosixStack.cc b/src/msg/async/PosixStack.cc index 2e432b8691993..e966621c7adc4 100644 --- a/src/msg/async/PosixStack.cc +++ b/src/msg/async/PosixStack.cc @@ -116,7 +116,7 @@ class PosixConnectedSocketImpl final : public ConnectedSocketImpl { while (left_pbrs) { struct msghdr msg; struct iovec msgvec[IOV_MAX]; - uint64_t size = MIN(left_pbrs, IOV_MAX); + uint64_t size = std::min(left_pbrs, IOV_MAX); left_pbrs -= size; memset(&msg, 0, sizeof(msg)); msg.msg_iovlen = size; diff --git a/src/msg/simple/Pipe.cc b/src/msg/simple/Pipe.cc index a5dd7e7998aa0..2c8b1ae63d3df 100644 --- a/src/msg/simple/Pipe.cc +++ b/src/msg/simple/Pipe.cc @@ -1999,7 +1999,7 @@ static void alloc_aligned_buffer(bufferlist& data, unsigned len, unsigned off) if (off & ~CEPH_PAGE_MASK) { // head unsigned head = 0; - head = MIN(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left); + head = std::min(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left); data.push_back(buffer::create(head)); left -= head; } @@ -2139,7 +2139,7 @@ int Pipe::read_message(Message **pm, AuthSessionHandler* auth_handler) } } bufferptr bp = blp.get_current_ptr(); - int read = MIN(bp.length(), left); + int read = std::min(bp.length(), left); ldout(msgr->cct,20) << "reader reading nonblocking into " << (void*)bp.c_str() << " len " << bp.length() << dendl; ssize_t got = tcp_read_nonblocking(bp.c_str(), read); ldout(msgr->cct,30) << "reader read " << got << " of " << read << dendl; @@ -2374,7 +2374,7 @@ int Pipe::write_message(const ceph_msg_header& header, const ceph_msg_footer& fo unsigned left = blist.length(); while (left > 0) { - unsigned donow = MIN(left, pb->length()-b_off); + unsigned donow = std::min(left, pb->length()-b_off); if (donow == 0) { ldout(msgr->cct,0) << "donow = " << donow << " left " << left << " pb->length " << pb->length() << " b_off " << b_off << dendl; @@ -2538,7 +2538,7 @@ ssize_t Pipe::buffered_recv(char *buf, size_t len, int flags) size_t left = len; ssize_t total_recv = 0; if (recv_len > recv_ofs) { - int to_read = MIN(recv_len - recv_ofs, left); + int to_read = std::min(recv_len - recv_ofs, left); memcpy(buf, &recv_buf[recv_ofs], to_read); recv_ofs += to_read; left -= to_read; @@ -2573,7 +2573,7 @@ ssize_t Pipe::buffered_recv(char *buf, size_t len, int flags) } recv_len = (size_t)got; - got = MIN(left, (size_t)got); + got = std::min(left, (size_t)got); memcpy(buf, recv_buf, got); recv_ofs = got; total_recv += got; diff --git a/src/os/bluestore/BitAllocator.cc b/src/os/bluestore/BitAllocator.cc index 8ab77ae2a018e..03ac942c88901 100644 --- a/src/os/bluestore/BitAllocator.cc +++ b/src/os/bluestore/BitAllocator.cc @@ -1122,7 +1122,7 @@ void BitAllocator::init_check(int64_t total_blocks, int64_t zone_size_block, unaligned_blocks = total_blocks % zone_size_block; m_extra_blocks = unaligned_blocks? zone_size_block - unaligned_blocks: 0; - total_blocks = ROUND_UP_TO(total_blocks, zone_size_block); + total_blocks = round_up_to(total_blocks, zone_size_block); m_alloc_mode = mode; m_is_stats_on = stats_on; diff --git a/src/os/bluestore/BitMapAllocator.cc b/src/os/bluestore/BitMapAllocator.cc index a6646bb085fc4..46dc939eaecf7 100644 --- a/src/os/bluestore/BitMapAllocator.cc +++ b/src/os/bluestore/BitMapAllocator.cc @@ -22,34 +22,34 @@ BitMapAllocator::BitMapAllocator(CephContext* cct, int64_t device_size, int64_t block_size) : cct(cct) { - if (!ISP2(block_size)) { + if (!isp2(block_size)) { derr << __func__ << " block_size " << block_size << " not power of 2 aligned!" << dendl; - assert(ISP2(block_size)); + assert(isp2(block_size)); return; } int64_t zone_size_blks = cct->_conf->bluestore_bitmapallocator_blocks_per_zone; - if (!ISP2(zone_size_blks)) { + if (!isp2(zone_size_blks)) { derr << __func__ << " zone_size " << zone_size_blks << " not power of 2 aligned!" << dendl; - assert(ISP2(zone_size_blks)); + assert(isp2(zone_size_blks)); return; } int64_t span_size = cct->_conf->bluestore_bitmapallocator_span_size; - if (!ISP2(span_size)) { + if (!isp2(span_size)) { derr << __func__ << " span_size " << span_size << " not power of 2 aligned!" << dendl; - assert(ISP2(span_size)); + assert(isp2(span_size)); return; } m_block_size = block_size; - m_total_size = P2ALIGN(device_size, block_size); + m_total_size = p2align(device_size, block_size); m_bit_alloc = new BitAllocator(cct, device_size / block_size, zone_size_blks, CONCURRENT, true); if (!m_bit_alloc) { @@ -182,7 +182,7 @@ void BitMapAllocator::init_add_free(uint64_t offset, uint64_t length) << dendl; uint64_t size = m_bit_alloc->size() * m_block_size; - uint64_t offset_adj = ROUND_UP_TO(offset, m_block_size); + uint64_t offset_adj = round_up_to(offset, m_block_size); uint64_t length_adj = ((length - (offset_adj - offset)) / m_block_size) * m_block_size; @@ -203,7 +203,7 @@ void BitMapAllocator::init_rm_free(uint64_t offset, uint64_t length) // we use the same adjustment/alignment that init_add_free does // above so that we can yank back some of the space. - uint64_t offset_adj = ROUND_UP_TO(offset, m_block_size); + uint64_t offset_adj = round_up_to(offset, m_block_size); uint64_t length_adj = ((length - (offset_adj - offset)) / m_block_size) * m_block_size; diff --git a/src/os/bluestore/BitmapFreelistManager.cc b/src/os/bluestore/BitmapFreelistManager.cc index 4678abf3ef78d..0017df01a8cf3 100644 --- a/src/os/bluestore/BitmapFreelistManager.cc +++ b/src/os/bluestore/BitmapFreelistManager.cc @@ -62,8 +62,8 @@ int BitmapFreelistManager::create(uint64_t new_size, uint64_t granularity, KeyValueDB::Transaction txn) { bytes_per_block = granularity; - assert(ISP2(bytes_per_block)); - size = P2ALIGN(new_size, bytes_per_block); + assert(isp2(bytes_per_block)); + size = p2align(new_size, bytes_per_block); blocks_per_key = cct->_conf->bluestore_freelist_blocks_per_key; _init_misc(); diff --git a/src/os/bluestore/BlueFS.cc b/src/os/bluestore/BlueFS.cc index 5644c3eb23a17..2e7c13c7c0f29 100644 --- a/src/os/bluestore/BlueFS.cc +++ b/src/os/bluestore/BlueFS.cc @@ -197,7 +197,7 @@ int BlueFS::reclaim_blocks(unsigned id, uint64_t want, extents); assert(got != 0); if (got < (int64_t)want) { - alloc[id]->unreserve(want - MAX(0, got)); + alloc[id]->unreserve(want - std::max(0, got)); } if (got < 0) { derr << __func__ << " failed to allocate space to return to bluestore" @@ -575,7 +575,7 @@ int BlueFS::_replay(bool noop, bool to_stdout) decode(uuid, p); decode(seq, p); if (len + 6 > bl.length()) { - more = ROUND_UP_TO(len + 6 - bl.length(), super.block_size); + more = round_up_to(len + 6 - bl.length(), super.block_size); } } if (uuid != super.uuid) { @@ -1005,7 +1005,7 @@ int BlueFS::_read_random( while (len > 0) { uint64_t x_off = 0; auto p = h->file->fnode.seek(off, &x_off); - uint64_t l = MIN(p->length - x_off, len); + uint64_t l = std::min(p->length - x_off, len); dout(20) << __func__ << " read buffered 0x" << std::hex << x_off << "~" << l << std::dec << " of " << *p << dendl; @@ -1057,11 +1057,11 @@ int BlueFS::_read( buf->bl_off = off & super.block_mask(); uint64_t x_off = 0; auto p = h->file->fnode.seek(buf->bl_off, &x_off); - uint64_t want = ROUND_UP_TO(len + (off & ~super.block_mask()), + uint64_t want = round_up_to(len + (off & ~super.block_mask()), super.block_size); - want = MAX(want, buf->max_prefetch); - uint64_t l = MIN(p->length - x_off, want); - uint64_t eof_offset = ROUND_UP_TO(h->file->fnode.size, super.block_size); + want = std::max(want, buf->max_prefetch); + uint64_t l = std::min(p->length - x_off, want); + uint64_t eof_offset = round_up_to(h->file->fnode.size, super.block_size); if (!h->ignore_eof && buf->bl_off + l > eof_offset) { l = eof_offset - buf->bl_off; @@ -1077,7 +1077,7 @@ int BlueFS::_read( dout(20) << __func__ << " left 0x" << std::hex << left << " len 0x" << len << std::dec << dendl; - int r = MIN(len, left); + int r = std::min(len, left); if (outbl) { bufferlist t; t.substr_of(buf->bl, off - buf->bl_off, r); @@ -1115,12 +1115,12 @@ void BlueFS::_invalidate_cache(FileRef f, uint64_t offset, uint64_t length) << dendl; if (offset & ~super.block_mask()) { offset &= super.block_mask(); - length = ROUND_UP_TO(length, super.block_size); + length = round_up_to(length, super.block_size); } uint64_t x_off = 0; auto p = f->fnode.seek(offset, &x_off); while (length > 0 && p != f->fnode.extents.end()) { - uint64_t x_len = MIN(p->length - x_off, length); + uint64_t x_len = std::min(p->length - x_off, length); bdev[p->bdev]->invalidate_cache(p->offset + x_off, x_len); dout(20) << __func__ << " 0x" << std::hex << x_off << "~" << x_len << std:: dec << " of " << *p << dendl; @@ -1139,7 +1139,7 @@ uint64_t BlueFS::_estimate_log_size() size += p.num_intervals() * (1 + 1 + sizeof(uint64_t) * 2); size += dir_map.size() + (1 + avg_dir_size); size += file_map.size() * (1 + avg_dir_size + avg_file_size); - return ROUND_UP_TO(size, super.block_size); + return round_up_to(size, super.block_size); } void BlueFS::compact_log() @@ -1337,7 +1337,7 @@ void BlueFS::_compact_log_async(std::unique_lock& l) _compact_log_dump_metadata(&t); // conservative estimate for final encoded size - new_log_jump_to = ROUND_UP_TO(t.op_bl.length() + super.block_size * 2, + new_log_jump_to = round_up_to(t.op_bl.length() + super.block_size * 2, cct->_conf->bluefs_alloc_size); t.op_jump(log_seq, new_log_jump_to); @@ -1729,7 +1729,7 @@ int BlueFS::_flush_range(FileWriter *h, uint64_t offset, uint64_t length) uint64_t bloff = 0; while (length > 0) { - uint64_t x_len = MIN(p->length - x_off, length); + uint64_t x_len = std::min(p->length - x_off, length); bufferlist t; t.substr_of(bl, bloff, x_len); unsigned tail = x_len & ~super.block_mask(); @@ -1932,7 +1932,7 @@ int BlueFS::_allocate(uint8_t id, uint64_t len, assert(id < alloc.size()); uint64_t min_alloc_size = cct->_conf->bluefs_alloc_size; - uint64_t left = ROUND_UP_TO(len, min_alloc_size); + uint64_t left = round_up_to(len, min_alloc_size); int r = -ENOSPC; int64_t alloc_len = 0; AllocExtentVector extents; diff --git a/src/os/bluestore/BlueStore.cc b/src/os/bluestore/BlueStore.cc index f66855a3c3d94..b65d5cf3d5929 100644 --- a/src/os/bluestore/BlueStore.cc +++ b/src/os/bluestore/BlueStore.cc @@ -588,8 +588,8 @@ void BlueStore::GarbageCollector::process_protrusive_extents( { assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset); - uint64_t lookup_start_offset = P2ALIGN(start_offset, min_alloc_size); - uint64_t lookup_end_offset = ROUND_UP_TO(end_offset, min_alloc_size); + uint64_t lookup_start_offset = p2align(start_offset, min_alloc_size); + uint64_t lookup_end_offset = round_up_to(end_offset, min_alloc_size); dout(30) << __func__ << " (hex): [" << std::hex << lookup_start_offset << ", " << lookup_end_offset @@ -681,7 +681,7 @@ void BlueStore::GarbageCollector::process_protrusive_extents( if (bi.referenced_bytes == 0) { uint64_t len_on_disk = b_it->first->get_blob().get_ondisk_length(); int64_t blob_expected_for_release = - ROUND_UP_TO(len_on_disk, min_alloc_size) / min_alloc_size; + round_up_to(len_on_disk, min_alloc_size) / min_alloc_size; dout(30) << __func__ << " " << *(b_it->first) << " expected4release=" << blob_expected_for_release @@ -1378,7 +1378,7 @@ void BlueStore::BufferSpace::read( if (b->is_writing() || b->is_clean()) { if (b->offset < offset) { uint32_t skip = offset - b->offset; - uint32_t l = MIN(length, b->length - skip); + uint32_t l = min(length, b->length - skip); res[offset].substr_of(b->data, skip, l); res_intervals.insert(offset, l); offset += l; @@ -1854,14 +1854,14 @@ bool BlueStore::Blob::can_reuse_blob(uint32_t min_alloc_size, uint32_t new_blen = blen; // make sure target_blob_size isn't less than current blob len - target_blob_size = MAX(blen, target_blob_size); + target_blob_size = std::max(blen, target_blob_size); if (b_offset >= blen) { // new data totally stands out of the existing blob new_blen = end; } else { // new data overlaps with the existing blob - new_blen = MAX(blen, end); + new_blen = std::max(blen, end); uint32_t overlap = 0; if (new_blen > blen) { @@ -2289,7 +2289,7 @@ void BlueStore::ExtentMap::reshard( unsigned target = cct->_conf->bluestore_extent_map_shard_target_size; unsigned slop = target * cct->_conf->bluestore_extent_map_shard_target_size_slop; - unsigned extent_avg = bytes / MAX(1, extents); + unsigned extent_avg = bytes / std::max(1u, extents); dout(20) << __func__ << " extent_avg " << extent_avg << ", target " << target << ", slop " << slop << dendl; @@ -3089,8 +3089,8 @@ bool BlueStore::WriteContext::has_conflict( assert((loffs_end % min_alloc_size) == 0); for (auto w : writes) { if (b == w.b) { - auto loffs2 = P2ALIGN(w.logical_offset, min_alloc_size); - auto loffs2_end = P2ROUNDUP(w.logical_offset + w.length0, min_alloc_size); + auto loffs2 = p2align(w.logical_offset, min_alloc_size); + auto loffs2_end = p2roundup(w.logical_offset + w.length0, min_alloc_size); if ((loffs <= loffs2 && loffs_end > loffs2) || (loffs >= loffs2 && loffs < loffs2_end)) { return true; @@ -4350,14 +4350,15 @@ int BlueStore::_open_fm(bool create) // allocate superblock reserved space. note that we do not mark // bluefs space as allocated in the freelist; we instead rely on // bluefs_extents. - uint64_t reserved = ROUND_UP_TO(MAX(SUPER_RESERVED, min_alloc_size), - min_alloc_size); + uint64_t reserved = round_up_to( + std::max(SUPER_RESERVED, min_alloc_size), + min_alloc_size); fm->allocate(0, reserved, t); if (cct->_conf->bluestore_bluefs) { assert(bluefs_extents.num_intervals() == 1); interval_set::iterator p = bluefs_extents.begin(); - reserved = ROUND_UP_TO(p.get_start() + p.get_len(), min_alloc_size); + reserved = round_up_to(p.get_start() + p.get_len(), min_alloc_size); dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec << " for bluefs" << dendl; bufferlist bl; @@ -4372,7 +4373,7 @@ int BlueStore::_open_fm(bool create) dout(1) << __func__ << " pre-fragmenting freespace, using " << cct->_conf->bluestore_debug_prefill << " with max free extent " << cct->_conf->bluestore_debug_prefragment_max << dendl; - uint64_t start = P2ROUNDUP(reserved, min_alloc_size); + uint64_t start = p2roundup(reserved, min_alloc_size); uint64_t max_b = cct->_conf->bluestore_debug_prefragment_max / min_alloc_size; float r = cct->_conf->bluestore_debug_prefill; r /= 1.0 - r; @@ -4382,16 +4383,16 @@ int BlueStore::_open_fm(bool create) uint64_t l = (rand() % max_b + 1) * min_alloc_size; if (start + l > end) { l = end - start; - l = P2ALIGN(l, min_alloc_size); + l = p2align(l, min_alloc_size); } assert(start + l <= end); uint64_t u = 1 + (uint64_t)(r * (double)l); - u = P2ROUNDUP(u, min_alloc_size); + u = p2roundup(u, min_alloc_size); if (start + l + u > end) { u = end - (start + l); // trim to align so we don't overflow again - u = P2ALIGN(u, min_alloc_size); + u = p2align(u, min_alloc_size); stop = true; } assert(start + l + u <= end); @@ -4735,7 +4736,7 @@ int BlueStore::_open_db(bool create, bool to_repair_db) uint64_t initial = bdev->get_size() * (cct->_conf->bluestore_bluefs_min_ratio + cct->_conf->bluestore_bluefs_gift_ratio); - initial = MAX(initial, cct->_conf->bluestore_bluefs_min); + initial = std::max(initial, cct->_conf->bluestore_bluefs_min); if (cct->_conf->bluefs_alloc_size % min_alloc_size) { derr << __func__ << " bluefs_alloc_size 0x" << std::hex << cct->_conf->bluefs_alloc_size << " is not a multiple of " @@ -4744,9 +4745,9 @@ int BlueStore::_open_db(bool create, bool to_repair_db) goto free_bluefs; } // align to bluefs's alloc_size - initial = P2ROUNDUP(initial, cct->_conf->bluefs_alloc_size); + initial = p2roundup(initial, cct->_conf->bluefs_alloc_size); // put bluefs in the middle of the device in case it is an HDD - uint64_t start = P2ALIGN((bdev->get_size() - initial) / 2, + uint64_t start = p2align((bdev->get_size() - initial) / 2, cct->_conf->bluefs_alloc_size); bluefs->add_block_extent(bluefs_shared_bdev, start, initial); bluefs_extents.insert(start, initial); @@ -5068,10 +5069,10 @@ int BlueStore::_balance_bluefs_freespace(PExtentVector *extents) if (gift) { // round up to alloc size - gift = P2ROUNDUP(gift, cct->_conf->bluefs_alloc_size); + gift = p2roundup(gift, cct->_conf->bluefs_alloc_size); // hard cap to fit into 32 bits - gift = MIN(gift, 1ull<<31); + gift = std::min(gift, 1 << 31); dout(10) << __func__ << " gifting " << gift << " (" << pretty_si_t(gift) << ")" << dendl; @@ -5101,10 +5102,10 @@ int BlueStore::_balance_bluefs_freespace(PExtentVector *extents) // reclaim from bluefs? if (reclaim) { // round up to alloc size - reclaim = P2ROUNDUP(reclaim, cct->_conf->bluefs_alloc_size); + reclaim = p2roundup(reclaim, cct->_conf->bluefs_alloc_size); // hard cap to fit into 32 bits - reclaim = MIN(reclaim, 1ull<<31); + reclaim = std::min(reclaim, 1 << 31); dout(10) << __func__ << " reclaiming " << reclaim << " (" << pretty_si_t(reclaim) << ")" << dendl; @@ -5409,7 +5410,7 @@ int BlueStore::mkfs() } // make sure min_alloc_size is power of 2 aligned. - if (!ISP2(min_alloc_size)) { + if (!isp2(min_alloc_size)) { derr << __func__ << " min_alloc_size 0x" << std::hex << min_alloc_size << std::dec << " is not power of 2 aligned!" @@ -5666,7 +5667,7 @@ static void apply(uint64_t off, BlueStore::mempool_dynamic_bitset &bitset, std::function f) { - auto end = ROUND_UP_TO(off + len, granularity); + auto end = round_up_to(off + len, granularity); while (off < end) { uint64_t pos = off / granularity; f(pos, bitset); @@ -5802,7 +5803,7 @@ int BlueStore::_fsck(bool deep, bool repair) used_blocks.resize(fm->get_alloc_units()); apply( - 0, MAX(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks, + 0, std::max(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks, [&](uint64_t pos, mempool_dynamic_bitset &bs) { assert(pos < bs.size()); bs.set(pos); @@ -6016,7 +6017,7 @@ int BlueStore::_fsck(bool deep, bool repair) uint64_t chunk_size = blob_len / (sizeof(*pu)*8); uint64_t start = l.blob_offset / chunk_size; uint64_t end = - ROUND_UP_TO(l.blob_offset + l.length, chunk_size) / chunk_size; + round_up_to(l.blob_offset + l.length, chunk_size) / chunk_size; for (auto i = start; i < end; ++i) { (*pu) |= (1u << i); } @@ -6273,8 +6274,8 @@ int BlueStore::_fsck(bool deep, bool repair) length == min_alloc_size - SUPER_RESERVED) { // this is due to the change just after luminous to min_alloc_size // granularity allocations, and our baked in assumption at the top - // of _fsck that 0~ROUND_UP_TO(SUPER_RESERVED,min_alloc_size) is used - // (vs luminous's ROUND_UP_TO(SUPER_RESERVED,block_size)). harmless, + // of _fsck that 0~round_up_to(SUPER_RESERVED,min_alloc_size) is used + // (vs luminous's round_up_to(SUPER_RESERVED,block_size)). harmless, // since we will never allocate this region below min_alloc_size. dout(10) << __func__ << " ignoring free extent between SUPER_RESERVED" << " and min_alloc_size, 0x" << std::hex << offset << "~" @@ -7068,7 +7069,7 @@ int BlueStore::_fiemap( uint64_t x_len = length; if (ep != eend && ep->logical_offset <= offset) { uint64_t x_off = offset - ep->logical_offset; - x_len = MIN(x_len, ep->length - x_off); + x_len = std::min(x_len, ep->length - x_off); dout(30) << __func__ << " lextent 0x" << std::hex << offset << "~" << x_len << std::dec << " blob " << ep->blob << dendl; destset.insert(offset, x_len); @@ -9664,7 +9665,7 @@ void BlueStore::_pad_zeros( size_t back_pad = 0; size_t pad_count = 0; if (front_pad) { - size_t front_copy = MIN(chunk_size - front_pad, length); + size_t front_copy = std::min(chunk_size - front_pad, length); bufferptr z = buffer::create_page_aligned(chunk_size); z.zero(0, front_pad, false); pad_count += front_pad; @@ -9747,10 +9748,10 @@ void BlueStore::_do_write_small( prev_ep = end; // to avoid this extent check as it's a duplicate } - auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size); + auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size); auto min_off = offset >= max_bsize ? offset - max_bsize : 0; uint32_t alloc_len = min_alloc_size; - auto offset0 = P2ALIGN(offset, alloc_len); + auto offset0 = p2align(offset, alloc_len); bool any_change; @@ -9778,8 +9779,8 @@ void BlueStore::_do_write_small( uint64_t chunk_size = b->get_blob().get_chunk_size(block_size); // can we pad our head/tail out with zeros? uint64_t head_pad, tail_pad; - head_pad = P2PHASE(offset, chunk_size); - tail_pad = P2NPHASE(end_offs, chunk_size); + head_pad = p2phase(offset, chunk_size); + tail_pad = p2nphase(end_offs, chunk_size); if (head_pad || tail_pad) { o->extent_map.fault_range(db, offset - head_pad, end_offs - offset + head_pad + tail_pad); @@ -9843,8 +9844,8 @@ void BlueStore::_do_write_small( return; } // read some data to fill out the chunk? - uint64_t head_read = P2PHASE(b_off, chunk_size); - uint64_t tail_read = P2NPHASE(b_off + b_len, chunk_size); + uint64_t head_read = p2phase(b_off, chunk_size); + uint64_t tail_read = p2nphase(b_off + b_len, chunk_size); if ((head_read || tail_read) && (b->get_blob().get_ondisk_length() >= b_off + b_len + tail_read) && head_read + tail_read < min_alloc_size) { @@ -10013,7 +10014,7 @@ void BlueStore::_do_write_small( // new blob. BlobRef b = c->new_blob(); - uint64_t b_off = P2PHASE(offset, alloc_len); + uint64_t b_off = p2phase(offset, alloc_len); uint64_t b_off0 = b_off; _pad_zeros(&bl, &b_off0, block_size); o->extent_map.punch_hole(c, offset, length, &wctx->old_extents); @@ -10038,10 +10039,10 @@ void BlueStore::_do_write_big( logger->inc(l_bluestore_write_big); logger->inc(l_bluestore_write_big_bytes, length); o->extent_map.punch_hole(c, offset, length, &wctx->old_extents); - auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size); + auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size); while (length > 0) { bool new_blob = false; - uint32_t l = MIN(max_bsize, length); + uint32_t l = std::min(max_bsize, length); BlobRef b; uint32_t b_off = 0; @@ -10173,7 +10174,7 @@ int BlueStore::_do_alloc_write( // compress (as needed) and calc needed space uint64_t need = 0; - auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size); + auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size); for (auto& wi : wctx->writes) { if (c && wi.blob_length > min_alloc_size) { utime_t start = ceph_clock_now(); @@ -10194,9 +10195,9 @@ int BlueStore::_do_alloc_write( wi.compressed_bl.claim_append(t); wi.compressed_len = wi.compressed_bl.length(); - uint64_t newlen = P2ROUNDUP(wi.compressed_len, min_alloc_size); + uint64_t newlen = p2roundup(wi.compressed_len, min_alloc_size); uint64_t want_len_raw = wi.blob_length * crr; - uint64_t want_len = P2ROUNDUP(want_len_raw, min_alloc_size); + uint64_t want_len = p2roundup(want_len_raw, min_alloc_size); if (newlen <= want_len && newlen < wi.blob_length) { // Cool. We compressed at least as much as we were hoping to. // pad out to min_alloc_size @@ -10315,7 +10316,7 @@ int BlueStore::_do_alloc_write( for (auto& p : extents) { txc->allocated.insert(p.offset, p.length); } - dblob.allocated(P2ALIGN(b_off, min_alloc_size), final_length, extents); + dblob.allocated(p2align(b_off, min_alloc_size), final_length, extents); dout(20) << __func__ << " blob " << *b << dendl; if (dblob.has_csum()) { @@ -10457,10 +10458,10 @@ void BlueStore::_do_write_data( uint64_t tail_offset, tail_length; head_offset = offset; - head_length = P2NPHASE(offset, min_alloc_size); + head_length = p2nphase(offset, min_alloc_size); - tail_offset = P2ALIGN(end, min_alloc_size); - tail_length = P2PHASE(end, min_alloc_size); + tail_offset = p2align(end, min_alloc_size); + tail_length = p2phase(end, min_alloc_size); middle_offset = head_offset + head_length; middle_length = length - head_length - tail_length; @@ -11638,10 +11639,12 @@ void BlueStore::DBHistogram::update_hist_entry(map(key_size, key_hist[prefix][key_slab].max_len); key_hist[prefix][key_slab].val_map[value_slab].count++; key_hist[prefix][key_slab].val_map[value_slab].max_len = - MAX(value_size, key_hist[prefix][key_slab].val_map[value_slab].max_len); + std::max(value_size, + key_hist[prefix][key_slab].val_map[value_slab].max_len); } void BlueStore::DBHistogram::dump(Formatter *f) @@ -11700,8 +11703,8 @@ void BlueStore::generate_db_histogram(Formatter *f) key_size = iter->key_size(); value_size = iter->value_size(); hist.value_hist[hist.get_value_slab(value_size)]++; - max_key_size = MAX(max_key_size, key_size); - max_value_size = MAX(max_value_size, value_size); + max_key_size = std::max(max_key_size, key_size); + max_value_size = std::max(max_value_size, value_size); total_key_size += key_size; total_value_size += value_size; diff --git a/src/os/bluestore/StupidAllocator.cc b/src/os/bluestore/StupidAllocator.cc index ab2e562be3b3c..0dd52a87c62b7 100644 --- a/src/os/bluestore/StupidAllocator.cc +++ b/src/os/bluestore/StupidAllocator.cc @@ -159,7 +159,7 @@ int64_t StupidAllocator::allocate_int( if (skew) skew = alloc_unit - skew; *offset = p.get_start() + skew; - *length = std::min(std::max(alloc_unit, want_size), P2ALIGN((p.get_len() - skew), alloc_unit)); + *length = std::min(std::max(alloc_unit, want_size), p2align((p.get_len() - skew), alloc_unit)); if (cct->_conf->bluestore_debug_small_allocations) { uint64_t max = alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations); diff --git a/src/os/bluestore/bluestore_types.cc b/src/os/bluestore/bluestore_types.cc index c176ab743da6a..09a7677c1e74b 100644 --- a/src/os/bluestore/bluestore_types.cc +++ b/src/os/bluestore/bluestore_types.cc @@ -165,7 +165,7 @@ void bluestore_extent_ref_map_t::get(uint64_t offset, uint32_t length) } if (p->first > offset) { // gap - uint64_t newlen = MIN(p->first - offset, length); + uint64_t newlen = std::min(p->first - offset, length); p = ref_map.insert( map::value_type(offset, record_t(newlen, 1))).first; @@ -386,7 +386,7 @@ void bluestore_blob_use_tracker_t::init( assert(_au_size > 0); assert(full_length > 0); clear(); - uint32_t _num_au = ROUND_UP_TO(full_length, _au_size) / _au_size; + uint32_t _num_au = round_up_to(full_length, _au_size) / _au_size; au_size = _au_size; if( _num_au > 1 ) { num_au = _num_au; @@ -406,7 +406,7 @@ void bluestore_blob_use_tracker_t::get( while (offset < end) { auto phase = offset % au_size; bytes_per_au[offset / au_size] += - MIN(au_size - phase, end - offset); + std::min(au_size - phase, end - offset); offset += (phase ? au_size - phase : au_size); } } @@ -430,7 +430,7 @@ bool bluestore_blob_use_tracker_t::put( while (offset < end) { auto phase = offset % au_size; size_t pos = offset / au_size; - auto diff = MIN(au_size - phase, end - offset); + auto diff = std::min(au_size - phase, end - offset); assert(diff <= bytes_per_au[pos]); bytes_per_au[pos] -= diff; offset += (phase ? au_size - phase : au_size); @@ -935,7 +935,7 @@ bool bluestore_blob_t::release_extents(bool all, uint32_t to_release = loffs_it->length; do { uint32_t to_release_part = - MIN(pext_it->length - delta0 - delta, to_release); + std::min(pext_it->length - delta0 - delta, to_release); auto o = pext_it->offset + delta0 + delta; if (last_r != r->end() && last_r->offset + last_r->length == o) { last_r->length += to_release_part; diff --git a/src/os/bluestore/bluestore_types.h b/src/os/bluestore/bluestore_types.h index f5e95ad9a3eb7..604360c03c4ab 100644 --- a/src/os/bluestore/bluestore_types.h +++ b/src/os/bluestore/bluestore_types.h @@ -353,7 +353,7 @@ struct bluestore_blob_use_tracker_t { } void prune_tail(uint32_t new_len) { if (num_au) { - new_len = ROUND_UP_TO(new_len, au_size); + new_len = round_up_to(new_len, au_size); uint32_t _num_au = new_len / au_size; assert(_num_au <= num_au); if (_num_au) { @@ -378,7 +378,7 @@ struct bluestore_blob_use_tracker_t { bytes_per_au[0] = old_total; } else { assert(_au_size == au_size); - new_len = ROUND_UP_TO(new_len, au_size); + new_len = round_up_to(new_len, au_size); uint32_t _num_au = new_len / au_size; assert(_num_au >= num_au); if (_num_au > num_au) { @@ -617,7 +617,7 @@ public: /// return chunk (i.e. min readable block) size for the blob uint64_t get_chunk_size(uint64_t dev_block_size) const { return has_csum() ? - MAX(dev_block_size, get_csum_chunk_size()) : dev_block_size; + std::max(dev_block_size, get_csum_chunk_size()) : dev_block_size; } uint32_t get_csum_chunk_size() const { return 1 << csum_chunk_order; @@ -688,7 +688,7 @@ public: assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); uint64_t start = offset / chunk_size; - uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size; + uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size; auto i = start; while (i < end && (unused & (1u << i))) { i++; @@ -702,7 +702,7 @@ public: assert((blob_len % (sizeof(unused)*8)) == 0); assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); - uint64_t start = ROUND_UP_TO(offset, chunk_size) / chunk_size; + uint64_t start = round_up_to(offset, chunk_size) / chunk_size; uint64_t end = (offset + length) / chunk_size; for (auto i = start; i < end; ++i) { unused |= (1u << i); @@ -720,7 +720,7 @@ public: assert(offset + length <= blob_len); uint64_t chunk_size = blob_len / (sizeof(unused)*8); uint64_t start = offset / chunk_size; - uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size; + uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size; for (auto i = start; i < end; ++i) { unused &= ~(1u << i); } @@ -741,7 +741,7 @@ public: } while (x_len > 0) { assert(p != extents.end()); - uint64_t l = MIN(p->length - x_off, x_len); + uint64_t l = std::min(p->length - x_off, x_len); int r = f(p->offset + x_off, l); if (r < 0) return r; @@ -765,7 +765,7 @@ public: uint64_t x_len = bl.length(); while (x_len > 0) { assert(p != extents.end()); - uint64_t l = MIN(p->length - x_off, x_len); + uint64_t l = std::min(p->length - x_off, x_len); bufferlist t; it.copy(l, t); f(p->offset + x_off, t); diff --git a/src/os/filestore/FileJournal.cc b/src/os/filestore/FileJournal.cc index ca5775310cc5e..e52ed031d0874 100644 --- a/src/os/filestore/FileJournal.cc +++ b/src/os/filestore/FileJournal.cc @@ -1570,7 +1570,7 @@ int FileJournal::prepare_entry(vector& tls, bufferlist memset(&h, 0, sizeof(h)); if (data_align >= 0) h.pre_pad = ((unsigned int)data_align - (unsigned int)head_size) & ~CEPH_PAGE_MASK; - off64_t size = ROUND_UP_TO(base_size + h.pre_pad, header.alignment); + off64_t size = round_up_to(base_size + h.pre_pad, header.alignment); unsigned post_pad = size - base_size - h.pre_pad; h.len = bl.length(); h.post_pad = post_pad; @@ -1745,10 +1745,10 @@ void FileJournal::do_discard(int64_t offset, int64_t end) { dout(10) << __func__ << "trim(" << offset << ", " << end << dendl; - offset = ROUND_UP_TO(offset, block_size); + offset = round_up_to(offset, block_size); if (offset >= end) return; - end = ROUND_UP_TO(end - block_size, block_size); + end = round_up_to(end - block_size, block_size); assert(end >= offset); if (offset < end) if (block_device_discard(fd, offset, end - offset) < 0) diff --git a/src/os/filestore/FileJournal.h b/src/os/filestore/FileJournal.h index f52510d2868cb..7283d0be71d94 100644 --- a/src/os/filestore/FileJournal.h +++ b/src/os/filestore/FileJournal.h @@ -389,7 +389,7 @@ private: } write_finish_thread; off64_t get_top() const { - return ROUND_UP_TO(sizeof(header), block_size); + return round_up_to(sizeof(header), block_size); } ZTracer::Endpoint trace_endpoint; diff --git a/src/os/kstore/KStore.cc b/src/os/kstore/KStore.cc index 9beedbaa70f11..a1cd535bad25c 100644 --- a/src/os/kstore/KStore.cc +++ b/src/os/kstore/KStore.cc @@ -1235,7 +1235,7 @@ int KStore::_do_read( _do_read_stripe(o, offset - stripe_off, &stripe); dout(30) << __func__ << " stripe " << offset - stripe_off << " got " << stripe.length() << dendl; - unsigned swant = MIN(stripe_size - stripe_off, length); + unsigned swant = std::min(stripe_size - stripe_off, length); if (stripe.length()) { if (swant == stripe.length()) { bl.claim_append(stripe); @@ -1243,7 +1243,7 @@ int KStore::_do_read( } else { unsigned l = 0; if (stripe_off < stripe.length()) { - l = MIN(stripe.length() - stripe_off, swant); + l = std::min(stripe.length() - stripe_off, swant); bufferlist t; t.substr_of(stripe, stripe_off, l); bl.claim_append(t); @@ -2656,7 +2656,7 @@ int KStore::_do_write(TransContext *txc, << ", got " << prev.length() << dendl; bufferlist bl; if (offset_rem) { - unsigned p = MIN(prev.length(), offset_rem); + unsigned p = std::min(prev.length(), offset_rem); if (p) { dout(20) << __func__ << " reuse leading " << p << " bytes" << dendl; bl.substr_of(prev, 0, p); @@ -2746,7 +2746,7 @@ int KStore::_zero(TransContext *txc, dout(30) << __func__ << " stripe " << pos - stripe_off << " got " << stripe.length() << dendl; bufferlist bl; - bl.substr_of(stripe, 0, MIN(stripe.length(), stripe_off)); + bl.substr_of(stripe, 0, std::min(stripe.length(), stripe_off)); if (end >= pos - stripe_off + stripe_size || end >= o->onode.size) { dout(20) << __func__ << " truncated stripe " << pos - stripe_off @@ -2803,7 +2803,7 @@ int KStore::_do_truncate(TransContext *txc, OnodeRef o, uint64_t offset) dout(30) << __func__ << " stripe " << pos - stripe_off << " got " << stripe.length() << dendl; bufferlist t; - t.substr_of(stripe, 0, MIN(stripe_off, stripe.length())); + t.substr_of(stripe, 0, std::min(stripe_off, stripe.length())); _do_write_stripe(txc, o, pos - stripe_off, t); dout(20) << __func__ << " truncated stripe " << pos - stripe_off << " to " << t.length() << dendl; diff --git a/src/os/memstore/MemStore.cc b/src/os/memstore/MemStore.cc index c4adda61ef1e8..591788c16a218 100644 --- a/src/os/memstore/MemStore.cc +++ b/src/os/memstore/MemStore.cc @@ -226,7 +226,7 @@ int MemStore::statfs(struct store_statfs_t *st) dout(10) << __func__ << dendl; st->reset(); st->total = cct->_conf->memstore_device_bytes; - st->available = MAX(int64_t(st->total) - int64_t(used_bytes), 0ll); + st->available = std::max(st->total - used_bytes, 0); dout(10) << __func__ << ": used_bytes: " << used_bytes << "/" << cct->_conf->memstore_device_bytes << dendl; return 0; @@ -1401,7 +1401,7 @@ int MemStore::_collection_add(const coll_t& cid, const coll_t& ocid, const ghobj if (!oc) return -ENOENT; RWLock::WLocker l1(std::min(&(*c), &(*oc))->lock); - RWLock::WLocker l2(MAX(&(*c), &(*oc))->lock); + RWLock::WLocker l2(std::max(&(*c), &(*oc))->lock); if (c->object_hash.count(oid)) return -EEXIST; @@ -1460,7 +1460,7 @@ int MemStore::_split_collection(const coll_t& cid, uint32_t bits, uint32_t match if (!dc) return -ENOENT; RWLock::WLocker l1(std::min(&(*sc), &(*dc))->lock); - RWLock::WLocker l2(MAX(&(*sc), &(*dc))->lock); + RWLock::WLocker l2(std::max(&(*sc), &(*dc))->lock); map::iterator p = sc->object_map.begin(); while (p != sc->object_map.end()) { @@ -1727,14 +1727,14 @@ int MemStore::PageSetObject::clone(Object *src, uint64_t srcoff, auto dst_iter = dst_pages.begin(); for (auto &src_page : tls_pages) { - auto sbegin = MAX(srcoff, src_page->offset); + auto sbegin = std::max(srcoff, src_page->offset); auto send = std::min(srcoff + count, src_page->offset + src_page_size); // zero-fill holes before src_page if (srcoff < sbegin) { while (dst_iter != dst_pages.end()) { auto &dst_page = *dst_iter; - auto dbegin = MAX(srcoff + delta, dst_page->offset); + auto dbegin = std::max(srcoff + delta, dst_page->offset); auto dend = std::min(sbegin + delta, dst_page->offset + dst_page_size); std::fill(dst_page->data + dbegin - dst_page->offset, dst_page->data + dend - dst_page->offset, 0); @@ -1750,7 +1750,7 @@ int MemStore::PageSetObject::clone(Object *src, uint64_t srcoff, // copy data from src page to dst pages while (dst_iter != dst_pages.end()) { auto &dst_page = *dst_iter; - auto dbegin = MAX(sbegin + delta, dst_page->offset); + auto dbegin = std::max(sbegin + delta, dst_page->offset); auto dend = std::min(send + delta, dst_page->offset + dst_page_size); std::copy(src_page->data + (dbegin - delta) - src_page->offset, @@ -1773,7 +1773,7 @@ int MemStore::PageSetObject::clone(Object *src, uint64_t srcoff, if (count > 0) { while (dst_iter != dst_pages.end()) { auto &dst_page = *dst_iter; - auto dbegin = MAX(dstoff, dst_page->offset); + auto dbegin = std::max(dstoff, dst_page->offset); auto dend = std::min(dstoff + count, dst_page->offset + dst_page_size); std::fill(dst_page->data + dbegin - dst_page->offset, dst_page->data + dend - dst_page->offset, 0); diff --git a/src/osd/ECBackend.h b/src/osd/ECBackend.h index a854ef99ffae4..813924a9e064e 100644 --- a/src/osd/ECBackend.h +++ b/src/osd/ECBackend.h @@ -211,7 +211,7 @@ public: private: friend struct ECRecoveryHandle; uint64_t get_recovery_chunk_size() const { - return ROUND_UP_TO(cct->_conf->osd_recovery_max_chunk, + return round_up_to(cct->_conf->osd_recovery_max_chunk, sinfo.get_stripe_width()); } diff --git a/src/osd/OSD.cc b/src/osd/OSD.cc index 07cc701eb7fdd..bf613d58d2602 100644 --- a/src/osd/OSD.cc +++ b/src/osd/OSD.cc @@ -708,14 +708,14 @@ void OSDService::promote_throttle_recalibrate() if (attempts && dur > 0) { uint64_t avg_size = 1; if (obj) - avg_size = MAX(bytes / obj, 1); + avg_size = std::max(bytes / obj, 1); unsigned po = (double)target_obj_sec * dur * 1000.0 / (double)attempts; unsigned pb = (double)target_bytes_sec / (double)avg_size * dur * 1000.0 / (double)attempts; dout(20) << __func__ << " po " << po << " pb " << pb << " avg_size " << avg_size << dendl; if (target_obj_sec && target_bytes_sec) - new_prob = MIN(po, pb); + new_prob = std::min(po, pb); else if (target_obj_sec) new_prob = po; else if (target_bytes_sec) @@ -735,13 +735,13 @@ void OSDService::promote_throttle_recalibrate() ratio = (double)actual / (double)prob; new_prob = (double)new_prob / ratio; } - new_prob = MAX(new_prob, min_prob); - new_prob = MIN(new_prob, 1000); + new_prob = std::max(new_prob, min_prob); + new_prob = std::min(new_prob, 1000u); // adjust prob = (prob + new_prob) / 2; - prob = MAX(prob, min_prob); - prob = MIN(prob, 1000); + prob = std::max(prob, min_prob); + prob = std::min(prob, 1000u); dout(10) << __func__ << " actual " << actual << ", actual/prob ratio " << ratio << ", adjusted new_prob " << new_prob @@ -1169,7 +1169,7 @@ bool OSDService::should_share_map(entity_name_t name, Connection *con, (osdmap->get_cluster_addr(name.num()) == con->get_peer_addr() || osdmap->get_hb_back_addr(name.num()) == con->get_peer_addr())) { // remember - epoch_t has = MAX(get_peer_epoch(name.num()), epoch); + epoch_t has = std::max(get_peer_epoch(name.num()), epoch); // share? if (has < osdmap->get_epoch()) { @@ -3551,9 +3551,9 @@ int OSD::update_crush_location() return r; } snprintf(weight, sizeof(weight), "%.4lf", - MAX((double).00001, - (double)(st.total) / - (double)(1ull << 40 /* TB */))); + std::max(.00001, + double(st.total) / + double(1ull << 40 /* TB */))); } std::multimap loc = cct->crush_location.get_location(); @@ -4878,7 +4878,7 @@ void OSD::heartbeat_check() << " front " << p->second.last_rx_front << " (cutoff " << cutoff << ")" << dendl; // fail - failure_queue[p->first] = MIN(p->second.last_rx_back, p->second.last_rx_front); + failure_queue[p->first] = std::min(p->second.last_rx_back, p->second.last_rx_front); } } } @@ -5026,7 +5026,7 @@ void OSD::tick_without_osd_lock() dout(10) << "tick_without_osd_lock" << dendl; logger->set(l_osd_buf, buffer::get_total_alloc()); - logger->set(l_osd_history_alloc_bytes, SHIFT_ROUND_UP(buffer::get_history_alloc_bytes(), 20)); + logger->set(l_osd_history_alloc_bytes, shift_round_up(buffer::get_history_alloc_bytes(), 20)); logger->set(l_osd_history_alloc_num, buffer::get_history_alloc_num()); logger->set(l_osd_cached_crc, buffer::get_cached_crc()); logger->set(l_osd_cached_crc_adjusted, buffer::get_cached_crc_adjusted()); @@ -7255,7 +7255,7 @@ void OSD::osdmap_subscribe(version_t epoch, bool force_request) if (latest_subscribed_epoch >= epoch && !force_request) return; - latest_subscribed_epoch = MAX(epoch, latest_subscribed_epoch); + latest_subscribed_epoch = std::max(epoch, latest_subscribed_epoch); if (monc->sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) || force_request) { @@ -7415,7 +7415,7 @@ void OSD::handle_osd_map(MOSDMap *m) uint64_t txn_size = 0; // store new maps: queue for disk and put in the osdmap cache - epoch_t start = MAX(superblock.newest_map + 1, first); + epoch_t start = std::max(superblock.newest_map + 1, first); for (epoch_t e = start; e <= last; e++) { if (txn_size >= t.get_num_bytes()) { derr << __func__ << " transaction size overflowed" << dendl; @@ -9008,7 +9008,7 @@ void OSDService::_maybe_queue_recovery() { uint64_t available_pushes; while (!awaiting_throttle.empty() && _recover_now(&available_pushes)) { - uint64_t to_start = MIN( + uint64_t to_start = std::min( available_pushes, cct->_conf->osd_recovery_max_single_start); _queue_for_recovery(awaiting_throttle.front(), to_start); diff --git a/src/osd/PG.cc b/src/osd/PG.cc index 4beaf7fb6d1a5..425ba6377cd64 100644 --- a/src/osd/PG.cc +++ b/src/osd/PG.cc @@ -2683,9 +2683,9 @@ void PG::_update_calc_stats() // computed using target and eventual used to get degraded total. unsigned target = get_osdmap()->get_pg_size(info.pgid.pgid); - unsigned nrep = MAX(actingset.size(), upset.size()); + unsigned nrep = std::max(actingset.size(), upset.size()); // calc num_object_copies - info.stats.stats.calc_copies(MAX(target, nrep)); + info.stats.stats.calc_copies(std::max(target, nrep)); info.stats.stats.sum.num_objects_degraded = 0; info.stats.stats.sum.num_objects_unfound = 0; info.stats.stats.sum.num_objects_misplaced = 0; @@ -2753,7 +2753,7 @@ void PG::_update_calc_stats() osd_missing = peer_missing[p].num_missing(); } - osd_objects = MAX(0, num_objects - osd_missing); + osd_objects = std::max(0, num_objects - osd_missing); object_copies += osd_objects; // Count non-missing objects not in up as misplaced if (!in_up) { @@ -2762,8 +2762,8 @@ void PG::_update_calc_stats() } } else { // If this peer has more objects then it should, ignore them - int64_t osd_backfilled = MIN(num_objects, - peer_info[p].stats.stats.sum.num_objects); + int64_t osd_backfilled = std::min(num_objects, + peer_info[p].stats.stats.sum.num_objects); backfill_target_objects.insert(make_pair(osd_backfilled, p)); backfilled += osd_backfilled; } @@ -2792,11 +2792,11 @@ void PG::_update_calc_stats() ++i, --num_misplaced) { adjust_misplaced += i->first; } - misplaced = MAX(0, misplaced - adjust_misplaced); + misplaced = std::max(0, misplaced - adjust_misplaced); // a degraded objects has fewer replicas or EC shards than the // pool specifies. num_object_copies will never be smaller than target * num_objects. - int64_t degraded = MAX(0, info.stats.stats.sum.num_object_copies - object_copies); + int64_t degraded = std::max(0, info.stats.stats.sum.num_object_copies - object_copies); info.stats.stats.sum.num_objects_degraded = degraded; info.stats.stats.sum.num_objects_unfound = get_num_unfound(); @@ -2808,7 +2808,7 @@ void PG::_update_blocked_by() { // set a max on the number of blocking peers we report. if we go // over, report a random subset. keep the result sorted. - unsigned keep = MIN(blocked_by.size(), cct->_conf->osd_max_pg_blocked_by); + unsigned keep = std::min(blocked_by.size(), cct->_conf->osd_max_pg_blocked_by); unsigned skip = blocked_by.size() - keep; info.stats.blocked_by.clear(); info.stats.blocked_by.resize(keep); @@ -4575,7 +4575,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) * left end of the range if we are a tier because they may legitimately * not exist (see _scrub). */ - int min = MAX(3, cct->_conf->osd_scrub_chunk_min); + int min = std::max(3, cct->_conf->osd_scrub_chunk_min); hobject_t start = scrubber.start; hobject_t candidate_end; vector objects; @@ -4583,7 +4583,7 @@ void PG::chunky_scrub(ThreadPool::TPHandle &handle) ret = get_pgbackend()->objects_list_partial( start, min, - MAX(min, cct->_conf->osd_scrub_chunk_max), + std::max(min, cct->_conf->osd_scrub_chunk_max), &objects, &candidate_end); assert(ret >= 0); diff --git a/src/osd/PGLog.cc b/src/osd/PGLog.cc index 412e923e936b8..b340ebf219dbc 100644 --- a/src/osd/PGLog.cc +++ b/src/osd/PGLog.cc @@ -230,12 +230,12 @@ void PGLog::proc_replica_log( * Furthermore, the event represented by a log tail was necessarily trimmed, * thus neither olog.tail nor log.tail can be divergent. It's * possible that olog/log contain no actual events between olog.head and - * MAX(log.tail, olog.tail), however, since they might have been split out. + * max(log.tail, olog.tail), however, since they might have been split out. * Thus, if we cannot find an event e such that * log.tail <= e.version <= log.head, the last_update must actually be - * MAX(log.tail, olog.tail). + * max(log.tail, olog.tail). */ - eversion_t limit = MAX(olog.tail, log.tail); + eversion_t limit = std::max(olog.tail, log.tail); eversion_t lu = (first_non_divergent == log.log.rend() || first_non_divergent->version < limit) ? @@ -390,14 +390,14 @@ void PGLog::merge_log(pg_info_t &oinfo, pg_log_t &olog, pg_shard_t fromosd, // find start point in olog list::iterator to = olog.log.end(); list::iterator from = olog.log.end(); - eversion_t lower_bound = MAX(olog.tail, orig_tail); + eversion_t lower_bound = std::max(olog.tail, orig_tail); while (1) { if (from == olog.log.begin()) break; --from; dout(20) << " ? " << *from << dendl; if (from->version <= log.head) { - lower_bound = MAX(lower_bound, from->version); + lower_bound = std::max(lower_bound, from->version); ++from; break; } diff --git a/src/osd/PrimaryLogPG.cc b/src/osd/PrimaryLogPG.cc index a1f2c054537e5..7310c8699c940 100644 --- a/src/osd/PrimaryLogPG.cc +++ b/src/osd/PrimaryLogPG.cc @@ -1175,9 +1175,11 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) << " != " << info.pgid << dendl; result = 0; // hmm? } else { - unsigned list_size = MIN(cct->_conf->osd_max_pgls, p->op.pgls.count); + unsigned list_size = std::min(cct->_conf->osd_max_pgls, + p->op.pgls.count); - dout(10) << " pgnls pg=" << m->get_pg() << " count " << list_size << dendl; + dout(10) << " pgnls pg=" << m->get_pg() << " count " << list_size + << dendl; // read into a buffer vector sentries; pg_nls_response_t response; @@ -1345,7 +1347,8 @@ void PrimaryLogPG::do_pg_op(OpRequestRef op) << " != " << info.pgid << dendl; result = 0; // hmm? } else { - unsigned list_size = MIN(cct->_conf->osd_max_pgls, p->op.pgls.count); + unsigned list_size = std::min(cct->_conf->osd_max_pgls, + p->op.pgls.count); dout(10) << " pgls pg=" << m->get_pg() << " count " << list_size << dendl; // read into a buffer @@ -1572,7 +1575,7 @@ void PrimaryLogPG::calc_trim_to() target = cct->_conf->osd_max_pg_log_entries; } - eversion_t limit = MIN( + eversion_t limit = std::min( min_last_complete_ondisk, pg_log.get_can_rollback_to()); size_t log_size = pg_log.get_log().log.size(); @@ -5293,7 +5296,7 @@ int PrimaryLogPG::do_read(OpContext *ctx, OSDOp& osd_op) { // XXX the op.extent.length is the requested length for async read // On error this length is changed to 0 after the error comes back. - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(op.extent.length, 10); + ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10); ctx->delta_stats.num_rd++; return result; } @@ -5392,7 +5395,8 @@ int PrimaryLogPG::do_sparse_read(OpContext *ctx, OSDOp& osd_op) { // verify trailing hole? if (cct->_conf->osd_verify_sparse_read_holes) { - uint64_t end = MIN(op.extent.offset + op.extent.length, oi.size); + uint64_t end = std::min(op.extent.offset + op.extent.length, + oi.size); if (last < end) { bufferlist t; uint64_t len = end - last; @@ -5433,7 +5437,7 @@ int PrimaryLogPG::do_sparse_read(OpContext *ctx, OSDOp& osd_op) { << soid << dendl; } - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(op.extent.length, 10); + ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10); ctx->delta_stats.num_rd++; return 0; } @@ -5593,7 +5597,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) if (r < 0) result = r; else - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(bl.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10); ctx->delta_stats.num_rd++; dout(10) << " map_extents done on object " << soid << dendl; } @@ -5841,7 +5845,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) if (r >= 0) { op.xattr.value_len = osd_op.outdata.length(); result = 0; - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); } else result = r; @@ -5860,7 +5864,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) bufferlist bl; encode(out, bl); - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(bl.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10); ctx->delta_stats.num_rd++; osd_op.outdata.claim_append(bl); } @@ -5884,7 +5888,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) break; ctx->delta_stats.num_rd++; - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(xattr.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(xattr.length(), 10); switch (op.xattr.cmp_mode) { case CEPH_OSD_CMPXATTR_MODE_STRING: @@ -6692,8 +6696,9 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) result = -EFBIG; break; } - unsigned max_name_len = MIN(osd->store->get_max_attr_name_length(), - cct->_conf->osd_max_attr_name_len); + unsigned max_name_len = + std::min(osd->store->get_max_attr_name_length(), + cct->_conf->osd_max_attr_name_len); if (op.xattr.name_len > max_name_len) { result = -ENAMETOOLONG; break; @@ -6883,7 +6888,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) encode(num, osd_op.outdata); osd_op.outdata.claim_append(bl); encode(truncated, osd_op.outdata); - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); ctx->delta_stats.num_rd++; } break; @@ -6939,7 +6944,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) encode(num, osd_op.outdata); osd_op.outdata.claim_append(bl); encode(truncated, osd_op.outdata); - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); ctx->delta_stats.num_rd++; } break; @@ -6953,7 +6958,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) ++ctx->num_read; { osd->store->omap_get_header(ch, ghobject_t(soid), &osd_op.outdata); - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); ctx->delta_stats.num_rd++; } break; @@ -6976,7 +6981,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) osd->store->omap_get_values(ch, ghobject_t(soid), keys_to_get, &out); } // else return empty omap entries encode(out, osd_op.outdata); - ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10); + ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10); ctx->delta_stats.num_rd++; } break; @@ -7089,7 +7094,7 @@ int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector& ops) } t->omap_setkeys(soid, to_set_bl); ctx->delta_stats.num_wr++; - ctx->delta_stats.num_wr_kb += SHIFT_ROUND_UP(to_set_bl.length(), 10); + ctx->delta_stats.num_wr_kb += shift_round_up(to_set_bl.length(), 10); } obs.oi.set_flag(object_info_t::FLAG_OMAP); obs.oi.clear_omap_digest(); @@ -7753,7 +7758,7 @@ void PrimaryLogPG::write_update_size_and_usage(object_stat_sum_t& delta_stats, o } } delta_stats.num_wr++; - delta_stats.num_wr_kb += SHIFT_ROUND_UP(length, 10); + delta_stats.num_wr_kb += shift_round_up(length, 10); } void PrimaryLogPG::truncate_update_size_and_usage( @@ -7977,7 +7982,7 @@ void PrimaryLogPG::finish_ctx(OpContext *ctx, int log_op_type) // finish and log the op. if (ctx->user_modify) { // update the user_version for any modify ops, except for the watch op - ctx->user_at_version = MAX(info.last_user_version, ctx->new_obs.oi.user_version) + 1; + ctx->user_at_version = std::max(info.last_user_version, ctx->new_obs.oi.user_version) + 1; /* In order for new clients and old clients to interoperate properly * when exchanging versions, we need to lower bound the user_version * (which our new clients pay proper attention to) @@ -8271,7 +8276,7 @@ int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::iterator& bp, bufferlist& bl = reply_obj.data; if (left > 0 && !cursor.data_complete) { if (cursor.data_offset < oi.size) { - uint64_t max_read = MIN(oi.size - cursor.data_offset, (uint64_t)left); + uint64_t max_read = std::min(oi.size - cursor.data_offset, (uint64_t)left); if (cb) { async_read_started = true; ctx->pending_async_reads.push_back( @@ -9027,7 +9032,7 @@ void PrimaryLogPG::finish_copyfrom(CopyFromCallback *cb) ctx->delta_stats.num_bytes += obs.oi.size; } ctx->delta_stats.num_wr++; - ctx->delta_stats.num_wr_kb += SHIFT_ROUND_UP(obs.oi.size, 10); + ctx->delta_stats.num_wr_kb += shift_round_up(obs.oi.size, 10); osd->logger->inc(l_osd_copyfrom); } @@ -9557,7 +9562,7 @@ int PrimaryLogPG::start_flush( flush_ops[soid] = fop; info.stats.stats.sum.num_flush++; - info.stats.stats.sum.num_flush_kb += SHIFT_ROUND_UP(oi.size, 10); + info.stats.stats.sum.num_flush_kb += shift_round_up(oi.size, 10); return -EINPROGRESS; } @@ -12463,7 +12468,7 @@ bool PrimaryLogPG::all_peer_done() const * All objects in PG in [MIN,backfill_info.begin) have been backfilled to all * backfill_targets. There may be objects on backfill_target(s) yet to be deleted. * - * For a backfill target, all objects < MIN(peer_backfill_info[target].begin, + * For a backfill target, all objects < std::min(peer_backfill_info[target].begin, * backfill_info.begin) in PG are backfilled. No deleted objects in this * interval remain on the backfill target. * @@ -12471,7 +12476,7 @@ bool PrimaryLogPG::all_peer_done() const * have been backfilled to target * * There *MAY* be missing/outdated objects between last_backfill_started and - * MIN(peer_backfill_info[*].begin, backfill_info.begin) in the event that client + * std::min(peer_backfill_info[*].begin, backfill_info.begin) in the event that client * io created objects since the last scan. For this reason, we call * update_range() again before continuing backfill. */ @@ -13792,7 +13797,7 @@ bool PrimaryLogPG::agent_maybe_evict(ObjectContextRef& obc, bool after_flush) if (obc->obs.oi.is_omap()) ctx->delta_stats.num_objects_omap--; ctx->delta_stats.num_evict++; - ctx->delta_stats.num_evict_kb += SHIFT_ROUND_UP(obc->obs.oi.size, 10); + ctx->delta_stats.num_evict_kb += shift_round_up(obc->obs.oi.size, 10); if (obc->obs.oi.is_dirty()) --ctx->delta_stats.num_objects_dirty; assert(r == 0); @@ -13913,20 +13918,20 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) uint64_t avg_size = num_user_bytes / num_user_objects; dirty_micro = num_dirty * avg_size * 1000000 / - MAX(pool.info.target_max_bytes / divisor, 1); + std::max(pool.info.target_max_bytes / divisor, 1); full_micro = num_user_objects * avg_size * 1000000 / - MAX(pool.info.target_max_bytes / divisor, 1); + std::max(pool.info.target_max_bytes / divisor, 1); } if (pool.info.target_max_objects > 0) { uint64_t dirty_objects_micro = num_dirty * 1000000 / - MAX(pool.info.target_max_objects / divisor, 1); + std::max(pool.info.target_max_objects / divisor, 1); if (dirty_objects_micro > dirty_micro) dirty_micro = dirty_objects_micro; uint64_t full_objects_micro = num_user_objects * 1000000 / - MAX(pool.info.target_max_objects / divisor, 1); + std::max(pool.info.target_max_objects / divisor, 1); if (full_objects_micro > full_micro) full_micro = full_objects_micro; } @@ -13942,8 +13947,8 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) flush_target += flush_slop; flush_high_target += flush_slop; } else { - flush_target -= MIN(flush_target, flush_slop); - flush_high_target -= MIN(flush_high_target, flush_slop); + flush_target -= std::min(flush_target, flush_slop); + flush_high_target -= std::min(flush_high_target, flush_slop); } if (dirty_micro > flush_high_target) { @@ -13958,7 +13963,7 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) if (restart || agent_state->evict_mode == TierAgentState::EVICT_MODE_IDLE) evict_target += evict_slop; else - evict_target -= MIN(evict_target, evict_slop); + evict_target -= std::min(evict_target, evict_slop); if (full_micro > 1000000) { // evict anything clean @@ -13969,8 +13974,9 @@ bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op) evict_mode = TierAgentState::EVICT_MODE_SOME; uint64_t over = full_micro - evict_target; uint64_t span = 1000000 - evict_target; - evict_effort = MAX(over * 1000000 / span, - (unsigned)(1000000.0 * cct->_conf->osd_agent_min_evict_effort)); + evict_effort = std::max(over * 1000000 / span, + uint64_t(1000000.0 * + cct->_conf->osd_agent_min_evict_effort)); // quantize effort to avoid too much reordering in the agent_queue. uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000; diff --git a/src/osd/ReplicatedBackend.cc b/src/osd/ReplicatedBackend.cc index 4a7008f578cb2..be83c8d82e671 100644 --- a/src/osd/ReplicatedBackend.cc +++ b/src/osd/ReplicatedBackend.cc @@ -965,7 +965,7 @@ Message * ReplicatedBackend::generate_subop( if (!parent->should_send_op(peer, soid)) { dout(10) << "issue_repop shipping empty opt to osd." << peer <<", object " << soid - << " beyond MAX(last_backfill_started " + << " beyond std::max(last_backfill_started " << ", pinfo.last_backfill " << pinfo.last_backfill << ")" << dendl; ObjectStore::Transaction t; diff --git a/src/osd/osd_types.cc b/src/osd/osd_types.cc index 98f12fe404169..c7246c498d5f6 100644 --- a/src/osd/osd_types.cc +++ b/src/osd/osd_types.cc @@ -190,7 +190,7 @@ void object_locator_t::encode(bufferlist& bl) const encode(nspace, bl); encode(hash, bl); if (hash != -1) - encode_compat = MAX(encode_compat, 6); // need to interpret the hash + encode_compat = std::max(encode_compat, 6); // need to interpret the hash ENCODE_FINISH_NEW_COMPAT(bl, encode_compat); } diff --git a/src/osdc/Striper.cc b/src/osdc/Striper.cc index e2af5c16b4100..ffdc45d9c3099 100644 --- a/src/osdc/Striper.cc +++ b/src/osdc/Striper.cc @@ -185,7 +185,7 @@ void Striper::extent_to_file(CephContext *cct, file_layout_t *layout, uint64_t stripeno = off / su + objectsetno * stripes_per_object; uint64_t blockno = stripeno * stripe_count + stripepos; uint64_t extent_off = blockno * su + off_in_block; - uint64_t extent_len = MIN(len, su - off_in_block); + uint64_t extent_len = std::min(len, su - off_in_block); extents.push_back(make_pair(extent_off, extent_len)); ldout(cct, 20) << " object " << off << "~" << extent_len @@ -267,7 +267,7 @@ void Striper::StripedReadResult::add_partial_result( p != buffer_extents.end(); ++p) { pair& r = partial[p->first]; - size_t actual = MIN(bl.length(), p->second); + size_t actual = std::min(bl.length(), p->second); bl.splice(0, actual, &r.first); r.second = p->second; total_intended_len += r.second; @@ -314,7 +314,7 @@ void Striper::StripedReadResult::add_partial_sparse_result( if (s->first > bl_off) { // gap in sparse read result pair& r = partial[tofs]; - size_t gap = MIN(s->first - bl_off, tlen); + size_t gap = std::min(s->first - bl_off, tlen); ldout(cct, 20) << " s gap " << gap << ", skipping" << dendl; r.second = gap; total_intended_len += r.second; @@ -328,7 +328,7 @@ void Striper::StripedReadResult::add_partial_sparse_result( assert(s->first <= bl_off); size_t left = (s->first + s->second) - bl_off; - size_t actual = MIN(left, tlen); + size_t actual = std::min(left, tlen); if (actual > 0) { ldout(cct, 20) << " s has " << actual << ", copying" << dendl; diff --git a/src/test/compressor/compressor_example.h b/src/test/compressor/compressor_example.h index 334a4eecd0fae..d2228c1bffb5c 100644 --- a/src/test/compressor/compressor_example.h +++ b/src/test/compressor/compressor_example.h @@ -45,7 +45,7 @@ public: } int decompress(bufferlist::iterator &p, size_t compressed_len, bufferlist &out) override { - p.copy(MIN(p.get_remaining(), compressed_len), out); + p.copy(std::min(p.get_remaining(), compressed_len), out); return 0; } }; diff --git a/src/test/compressor/test_compression.cc b/src/test/compressor/test_compression.cc index 529033b9a6860..a799a93ddd0e7 100644 --- a/src/test/compressor/test_compression.cc +++ b/src/test/compressor/test_compression.cc @@ -223,7 +223,7 @@ TEST_P(CompressorTest, sharded_input_decompress) size_t left = out.length()-small_prefix_size; size_t offs = small_prefix_size; while( left > 0 ){ - size_t shard_size = MIN( 2048, left ); + size_t shard_size = std::min(2048, left); tmp.substr_of(out, offs, shard_size ); out2.append( tmp ); left -= shard_size; diff --git a/src/test/librados/misc.cc b/src/test/librados/misc.cc index a931c7d718ac4..e3101e23c3713 100644 --- a/src/test/librados/misc.cc +++ b/src/test/librados/misc.cc @@ -754,7 +754,8 @@ TEST_F(LibRadosMiscPP, BigAttrPP) { for (int i=0; i<1000; i++) { bl.clear(); got.clear(); - bl.append(buffer::create(MIN(g_conf->osd_max_attr_size, 1024))); + bl.append(buffer::create(std::min(g_conf->osd_max_attr_size, + 1024))); char n[10]; snprintf(n, sizeof(n), "a%d", i); ASSERT_EQ(0, ioctx.setxattr("foo", n, bl)); diff --git a/src/test/objectstore/store_test.cc b/src/test/objectstore/store_test.cc index 5eb81afe020e0..a50972a4b8150 100644 --- a/src/test/objectstore/store_test.cc +++ b/src/test/objectstore/store_test.cc @@ -3767,9 +3767,9 @@ public: uint64_t dstoff = srcoff; //u1(*rng); uint64_t len = u2(*rng); if (write_alignment) { - srcoff = ROUND_UP_TO(srcoff, write_alignment); - dstoff = ROUND_UP_TO(dstoff, write_alignment); - len = ROUND_UP_TO(len, write_alignment); + srcoff = round_up_to(srcoff, write_alignment); + dstoff = round_up_to(dstoff, write_alignment); + len = round_up_to(len, write_alignment); } if (srcoff > srcdata.length() - 1) { @@ -3837,8 +3837,8 @@ public: uint64_t len = u2(*rng); bufferlist bl; if (write_alignment) { - offset = ROUND_UP_TO(offset, write_alignment); - len = ROUND_UP_TO(len, write_alignment); + offset = round_up_to(offset, write_alignment); + len = round_up_to(len, write_alignment); } filled_byte_array(bl, len); @@ -3882,7 +3882,7 @@ public: boost::uniform_int<> choose(0, max_object_len); size_t len = choose(*rng); if (write_alignment) { - len = ROUND_UP_TO(len, write_alignment); + len = round_up_to(len, write_alignment); } t.truncate(cid, obj, len); @@ -3918,8 +3918,8 @@ public: uint64_t offset = u1(*rng); uint64_t len = u2(*rng); if (write_alignment) { - offset = ROUND_UP_TO(offset, write_alignment); - len = ROUND_UP_TO(len, write_alignment); + offset = round_up_to(offset, write_alignment); + len = round_up_to(len, write_alignment); } if (len > 0) { diff --git a/src/test/osdc/object_cacher_stress.cc b/src/test/osdc/object_cacher_stress.cc index 103347d3ac8a0..bca96512fedbe 100644 --- a/src/test/osdc/object_cacher_stress.cc +++ b/src/test/osdc/object_cacher_stress.cc @@ -89,9 +89,9 @@ int stress_test(uint64_t num_ops, uint64_t num_objs, for (uint64_t i = 0; i < num_ops; ++i) { uint64_t offset = random() % max_obj_size; - uint64_t max_len = MIN(max_obj_size - offset, max_op_len); + uint64_t max_len = std::min(max_obj_size - offset, max_op_len); // no zero-length operations - uint64_t length = random() % (MAX(max_len - 1, 1)) + 1; + uint64_t length = random() % (std::max(max_len - 1, 1)) + 1; std::string oid = "test" + stringify(random() % num_objs); bool is_read = random() < percent_reads * RAND_MAX; ceph::shared_ptr op(new op_data(oid, offset, length, is_read)); -- 2.39.5