ldout(cct, 10) << __func__ << " added " << added << dendl;
data.rotating_ver++;
//data.next_rotating_time = ceph_clock_now(cct);
- //data.next_rotating_time += MIN(cct->_conf->auth_mon_ticket_ttl, cct->_conf->auth_service_ticket_ttl);
+ //data.next_rotating_time += std::min(cct->_conf->auth_mon_ticket_ttl, cct->_conf->auth_service_ticket_ttl);
_dump_rotating_secrets();
return true;
}
uint64_t end_offset = byte_offset + byte_length;
while (byte_offset < end_offset) {
- uint64_t len = MIN(BLOCK_SIZE, end_offset - byte_offset);
+ uint64_t len = std::min<uint64_t>(BLOCK_SIZE, end_offset - byte_offset);
bufferlist bit;
bit.substr_of(m_data, byte_offset, len);
}
while (byte_offset < end_offset) {
- uint64_t len = MIN(BLOCK_SIZE, end_offset - byte_offset);
+ uint64_t len = std::min<uint64_t>(BLOCK_SIZE, end_offset - byte_offset);
bufferptr ptr;
it.copy_deep(len, ptr);
using namespace ceph;
-#define CEPH_BUFFER_ALLOC_UNIT (MIN(CEPH_PAGE_SIZE, 4096))
+#define CEPH_BUFFER_ALLOC_UNIT (std::min(CEPH_PAGE_SIZE, 4096u))
#define CEPH_BUFFER_APPEND_SIZE (CEPH_BUFFER_ALLOC_UNIT - sizeof(raw_combined))
#ifdef BUFFER_DEBUG
int mempool = mempool::mempool_buffer_anon) {
if (!align)
align = sizeof(size_t);
- size_t rawlen = ROUND_UP_TO(sizeof(buffer::raw_combined),
+ size_t rawlen = round_up_to(sizeof(buffer::raw_combined),
alignof(buffer::raw_combined));
- size_t datalen = ROUND_UP_TO(len, alignof(buffer::raw_combined));
+ size_t datalen = round_up_to(len, alignof(buffer::raw_combined));
#ifdef DARWIN
char *ptr = (char *) valloc(rawlen + datalen);
}
}
*data = p->c_str() + p_off;
- size_t l = MIN(p->length() - p_off, want);
+ size_t l = std::min<size_t>(p->length() - p_off, want);
p_off += l;
if (p_off == p->length()) {
++p;
uint32_t buffer::list::iterator_impl<is_const>::crc32c(
size_t length, uint32_t crc)
{
- length = MIN( length, get_remaining());
+ length = std::min<size_t>(length, get_remaining());
while (length > 0) {
const char *p;
size_t l = get_ptr_and_advance(length, &p);
if (max_buffers && _buffers.size() > max_buffers
&& _len > (max_buffers * align_size)) {
- align_size = ROUND_UP_TO(ROUND_UP_TO(_len, max_buffers) / max_buffers, align_size);
+ align_size = round_up_to(round_up_to(_len, max_buffers) / max_buffers, align_size);
}
std::list<ptr>::iterator p = _buffers.begin();
while (p != _buffers.end()) {
// make a new append_buffer. fill out a complete page, factoring in the
// raw_combined overhead.
- size_t need = ROUND_UP_TO(len, sizeof(size_t)) + sizeof(raw_combined);
- size_t alen = ROUND_UP_TO(need, CEPH_BUFFER_ALLOC_UNIT) -
+ size_t need = round_up_to(len, sizeof(size_t)) + sizeof(raw_combined);
+ size_t alen = round_up_to(need, CEPH_BUFFER_ALLOC_UNIT) -
sizeof(raw_combined);
append_buffer = raw_combined::create(alen, 0, get_mempool());
append_buffer.set_length(0); // unused, so far.
while (left_pbrs) {
ssize_t bytes = 0;
unsigned iovlen = 0;
- uint64_t size = MIN(left_pbrs, IOV_MAX);
+ uint64_t size = std::min<uint64_t>(left_pbrs, IOV_MAX);
left_pbrs -= size;
while (size > 0) {
iov[iovlen].iov_base = (void *)p->c_str();
return -1;
}
- size_t remaining = MIN(p.get_remaining(), compressed_size);
+ size_t remaining = std::min<size_t>(p.get_remaining(), compressed_size);
while(remaining) {
long unsigned int len = p.get_ptr_and_advance(remaining, &c_in);
// now do the items.
if (!used_items.empty())
- size = MAX(size, *used_items.rbegin());
+ size = std::max(size, *used_items.rbegin());
vector<int> items(size);
vector<int> weights(size);
uint64_t max_payload_size = m_metadata->get_object_size() -
Entry::get_fixed_size();
if (m_metadata->get_settings().max_payload_bytes > 0) {
- max_payload_size = MIN(max_payload_size,
- m_metadata->get_settings().max_payload_bytes);
+ max_payload_size = std::min(max_payload_size,
+ m_metadata->get_settings().max_payload_bytes);
}
return max_payload_size;
}
++p) {
pool_stat_t& pv = result[p->first];
object_stat_sum_t *sum = &p->second.stats.sum;
- pv.num_kb = SHIFT_ROUND_UP(sum->num_bytes, 10);
+ pv.num_kb = shift_round_up(sum->num_bytes, 10);
pv.num_bytes = sum->num_bytes;
pv.num_objects = sum->num_objects;
pv.num_object_clones = sum->num_object_clones;
}
::pool_stat_t& r = rawresult[pool_name];
- stats->num_kb = SHIFT_ROUND_UP(r.stats.sum.num_bytes, 10);
+ stats->num_kb = shift_round_up(r.stats.sum.num_bytes, 10);
stats->num_bytes = r.stats.sum.num_bytes;
stats->num_objects = r.stats.sum.num_objects;
stats->num_object_clones = r.stats.sum.num_object_clones;
max_inc *= pi->layout.object_size;
new_max = std::min(new_max, size + max_inc);
}
- return ROUND_UP_TO(new_max, pi->get_layout_size_increment());
+ return round_up_to(new_max, pi->get_layout_size_increment());
}
void Locker::calc_new_client_ranges(CInode *in, uint64_t size,
}
// process queue
- unsigned max = MAX(1, g_conf->mon_osd_max_creating_pgs);
+ unsigned max = std::max<int64_t>(1, g_conf->mon_osd_max_creating_pgs);
const auto total = pending_creatings.pgs.size();
while (pending_creatings.pgs.size() < max &&
!pending_creatings.queue.empty()) {
<< " modified " << p->second.modified
<< " [" << p->second.start << "-" << p->second.end << ")"
<< dendl;
- int n = MIN(max - pending_creatings.pgs.size(),
+ int n = std::min(max - pending_creatings.pgs.size(),
p->second.end - p->second.start);
ps_t first = p->second.start;
ps_t end = first + n;
// just check a few pgs for efficiency - this can't give a guarantee anyway,
// since filestore osds could always join the pool later
set<int> checked_osds;
- for (unsigned ps = 0; ps < MIN(8, pool.get_pg_num()); ++ps) {
+ for (unsigned ps = 0; ps < std::min(8u, pool.get_pg_num()); ++ps) {
vector<int> up, acting;
pg_t pgid(ps, pool_id, -1);
osdmap.pg_to_up_acting_osds(pgid, up, acting);
epoch_t first = get_first_committed();
epoch_t last = osdmap.get_epoch();
int max = g_conf->osd_map_message_max;
- for (epoch_t e = MAX(first, m->get_full_first());
- e <= MIN(last, m->get_full_last()) && max > 0;
+ for (epoch_t e = std::max(first, m->get_full_first());
+ e <= std::min(last, m->get_full_last()) && max > 0;
++e, --max) {
int r = get_version_full(e, reply->maps[e]);
assert(r >= 0);
}
- for (epoch_t e = MAX(first, m->get_inc_first());
- e <= MIN(last, m->get_inc_last()) && max > 0;
+ for (epoch_t e = std::max(first, m->get_inc_first());
+ e <= std::min(last, m->get_inc_last()) && max > 0;
++e, --max) {
int r = get_version(e, reply->incremental_maps[e]);
assert(r >= 0);
}
while (first <= osdmap.get_epoch()) {
- epoch_t last = MIN(first + g_conf->osd_map_message_max - 1,
- osdmap.get_epoch());
+ epoch_t last = std::min<epoch_t>(first + g_conf->osd_map_message_max - 1,
+ osdmap.get_epoch());
MOSDMap *m = build_incremental(first, last);
if (req) {
err = get_erasure_code(erasure_code_profile, &erasure_code, ss);
if (err == 0) {
*size = erasure_code->get_chunk_count();
- *min_size = MIN(erasure_code->get_data_chunk_count() + 1, *size);
+ *min_size = std::min(erasure_code->get_data_chunk_count() + 1, *size);
}
}
break;
ss << "splits in cache pools must be followed by scrubs and leave sufficient free space to avoid overfilling. use --yes-i-really-mean-it to force.";
return -EPERM;
}
- int expected_osds = MIN(p.get_pg_num(), osdmap.get_num_osds());
+ int expected_osds = std::min(p.get_pg_num(), osdmap.get_num_osds());
int64_t new_pgs = n - p.get_pg_num();
if (new_pgs > g_conf->mon_osd_max_split_count * expected_osds) {
ss << "specified pg_num " << n << " is too large (creating "
{
std::stringstream ss;
ss << p->first;
- max_width = MAX(ss.str().size(), max_width);
+ max_width = std::max<size_t>(ss.str().size(), max_width);
}
for (multimap<int,int>::reverse_iterator p = state_by_count.rbegin();
}
if (f) {
- f->dump_int("kb_used", SHIFT_ROUND_UP(sum.num_bytes, 10));
+ f->dump_int("kb_used", shift_round_up(sum.num_bytes, 10));
f->dump_int("bytes_used", sum.num_bytes);
f->dump_format_unquoted("percent_used", "%.2f", (used*100));
f->dump_unsigned("max_avail", avail / raw_used_rate);
}
double unusable = (double)osd_info->second.kb *
(1.0 - fratio);
- double avail = MAX(0.0, (double)osd_info->second.kb_avail - unusable);
+ double avail = std::max(0.0, (double)osd_info->second.kb_avail - unusable);
avail *= 1024.0;
int64_t proj = (int64_t)(avail / (double)p->second);
if (min < 0 || proj < min) {
average_util = (double)num_pg_copies / weight_sum;
} else {
// by osd utilization
- int num_osd = MAX(1, pgm.osd_stat.size());
+ int num_osd = std::max<size_t>(1, pgm.osd_stat.size());
if ((uint64_t)pgm.osd_sum.kb * 1024 / num_osd
< g_conf->mon_reweight_min_bytes_per_osd) {
*ss << "Refusing to reweight: we only have " << pgm.osd_sum.kb
// to represent e.g. differing storage capacities
unsigned new_weight = (unsigned)((average_util / util) * (float)weight);
if (weight > max_change)
- new_weight = MAX(new_weight, weight - max_change);
+ new_weight = std::max(new_weight, weight - max_change);
new_weights->insert({p.first, new_weight});
if (f) {
f->open_object_section("osd");
if (!no_increasing && util <= underload_util) {
// assign a higher weight.. if we can.
unsigned new_weight = (unsigned)((average_util / util) * (float)weight);
- new_weight = MIN(new_weight, weight + max_change);
+ new_weight = std::min(new_weight, weight + max_change);
if (new_weight > 0x10000)
new_weight = 0x10000;
if (new_weight > weight) {
if (off & ~CEPH_PAGE_MASK) {
// head
alloc_len += CEPH_PAGE_SIZE;
- head = MIN(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left);
+ head = std::min<uint64_t>(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left);
left -= head;
}
alloc_len += left;
state(STATE_NONE), state_after_send(STATE_NONE), port(-1),
dispatch_queue(q), can_write(WriteStatus::NOWRITE),
keepalive(false), recv_buf(NULL),
- recv_max_prefetch(MAX(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)),
+ recv_max_prefetch(std::max<int64_t>(msgr->cct->_conf->ms_tcp_prefetch_max_size, TCP_PREFETCH_MIN_SIZE)),
recv_start(0), recv_end(0),
last_active(ceph::coarse_mono_clock::now()),
inactive_timeout_us(cct->_conf->ms_tcp_read_timeout*1000*1000),
ssize_t r = 0;
uint64_t left = len - state_offset;
if (recv_end > recv_start) {
- uint64_t to_read = MIN(recv_end - recv_start, left);
+ uint64_t to_read = std::min<uint64_t>(recv_end - recv_start, left);
memcpy(p, recv_buf+recv_start, to_read);
recv_start += to_read;
left -= to_read;
{
while (msg_left > 0) {
bufferptr bp = data_blp.get_current_ptr();
- unsigned read = MIN(bp.length(), msg_left);
+ unsigned read = std::min(bp.length(), msg_left);
r = read_until(read, bp.c_str());
if (r < 0) {
ldout(async_msgr->cct, 1) << __func__ << " read data error " << dendl;
while (left_pbrs) {
struct msghdr msg;
struct iovec msgvec[IOV_MAX];
- uint64_t size = MIN(left_pbrs, IOV_MAX);
+ uint64_t size = std::min<uint64_t>(left_pbrs, IOV_MAX);
left_pbrs -= size;
memset(&msg, 0, sizeof(msg));
msg.msg_iovlen = size;
if (off & ~CEPH_PAGE_MASK) {
// head
unsigned head = 0;
- head = MIN(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left);
+ head = std::min<uint64_t>(CEPH_PAGE_SIZE - (off & ~CEPH_PAGE_MASK), left);
data.push_back(buffer::create(head));
left -= head;
}
}
}
bufferptr bp = blp.get_current_ptr();
- int read = MIN(bp.length(), left);
+ int read = std::min(bp.length(), left);
ldout(msgr->cct,20) << "reader reading nonblocking into " << (void*)bp.c_str() << " len " << bp.length() << dendl;
ssize_t got = tcp_read_nonblocking(bp.c_str(), read);
ldout(msgr->cct,30) << "reader read " << got << " of " << read << dendl;
unsigned left = blist.length();
while (left > 0) {
- unsigned donow = MIN(left, pb->length()-b_off);
+ unsigned donow = std::min(left, pb->length()-b_off);
if (donow == 0) {
ldout(msgr->cct,0) << "donow = " << donow << " left " << left << " pb->length " << pb->length()
<< " b_off " << b_off << dendl;
size_t left = len;
ssize_t total_recv = 0;
if (recv_len > recv_ofs) {
- int to_read = MIN(recv_len - recv_ofs, left);
+ int to_read = std::min(recv_len - recv_ofs, left);
memcpy(buf, &recv_buf[recv_ofs], to_read);
recv_ofs += to_read;
left -= to_read;
}
recv_len = (size_t)got;
- got = MIN(left, (size_t)got);
+ got = std::min(left, (size_t)got);
memcpy(buf, recv_buf, got);
recv_ofs = got;
total_recv += got;
unaligned_blocks = total_blocks % zone_size_block;
m_extra_blocks = unaligned_blocks? zone_size_block - unaligned_blocks: 0;
- total_blocks = ROUND_UP_TO(total_blocks, zone_size_block);
+ total_blocks = round_up_to(total_blocks, zone_size_block);
m_alloc_mode = mode;
m_is_stats_on = stats_on;
int64_t block_size)
: cct(cct)
{
- if (!ISP2(block_size)) {
+ if (!isp2(block_size)) {
derr << __func__ << " block_size " << block_size
<< " not power of 2 aligned!"
<< dendl;
- assert(ISP2(block_size));
+ assert(isp2(block_size));
return;
}
int64_t zone_size_blks = cct->_conf->bluestore_bitmapallocator_blocks_per_zone;
- if (!ISP2(zone_size_blks)) {
+ if (!isp2(zone_size_blks)) {
derr << __func__ << " zone_size " << zone_size_blks
<< " not power of 2 aligned!"
<< dendl;
- assert(ISP2(zone_size_blks));
+ assert(isp2(zone_size_blks));
return;
}
int64_t span_size = cct->_conf->bluestore_bitmapallocator_span_size;
- if (!ISP2(span_size)) {
+ if (!isp2(span_size)) {
derr << __func__ << " span_size " << span_size
<< " not power of 2 aligned!"
<< dendl;
- assert(ISP2(span_size));
+ assert(isp2(span_size));
return;
}
m_block_size = block_size;
- m_total_size = P2ALIGN(device_size, block_size);
+ m_total_size = p2align(device_size, block_size);
m_bit_alloc = new BitAllocator(cct, device_size / block_size,
zone_size_blks, CONCURRENT, true);
if (!m_bit_alloc) {
<< dendl;
uint64_t size = m_bit_alloc->size() * m_block_size;
- uint64_t offset_adj = ROUND_UP_TO(offset, m_block_size);
+ uint64_t offset_adj = round_up_to(offset, m_block_size);
uint64_t length_adj = ((length - (offset_adj - offset)) /
m_block_size) * m_block_size;
// we use the same adjustment/alignment that init_add_free does
// above so that we can yank back some of the space.
- uint64_t offset_adj = ROUND_UP_TO(offset, m_block_size);
+ uint64_t offset_adj = round_up_to(offset, m_block_size);
uint64_t length_adj = ((length - (offset_adj - offset)) /
m_block_size) * m_block_size;
KeyValueDB::Transaction txn)
{
bytes_per_block = granularity;
- assert(ISP2(bytes_per_block));
- size = P2ALIGN(new_size, bytes_per_block);
+ assert(isp2(bytes_per_block));
+ size = p2align(new_size, bytes_per_block);
blocks_per_key = cct->_conf->bluestore_freelist_blocks_per_key;
_init_misc();
extents);
assert(got != 0);
if (got < (int64_t)want) {
- alloc[id]->unreserve(want - MAX(0, got));
+ alloc[id]->unreserve(want - std::max<int64_t>(0, got));
}
if (got < 0) {
derr << __func__ << " failed to allocate space to return to bluestore"
decode(uuid, p);
decode(seq, p);
if (len + 6 > bl.length()) {
- more = ROUND_UP_TO(len + 6 - bl.length(), super.block_size);
+ more = round_up_to(len + 6 - bl.length(), super.block_size);
}
}
if (uuid != super.uuid) {
while (len > 0) {
uint64_t x_off = 0;
auto p = h->file->fnode.seek(off, &x_off);
- uint64_t l = MIN(p->length - x_off, len);
+ uint64_t l = std::min(p->length - x_off, len);
dout(20) << __func__ << " read buffered 0x"
<< std::hex << x_off << "~" << l << std::dec
<< " of " << *p << dendl;
buf->bl_off = off & super.block_mask();
uint64_t x_off = 0;
auto p = h->file->fnode.seek(buf->bl_off, &x_off);
- uint64_t want = ROUND_UP_TO(len + (off & ~super.block_mask()),
+ uint64_t want = round_up_to(len + (off & ~super.block_mask()),
super.block_size);
- want = MAX(want, buf->max_prefetch);
- uint64_t l = MIN(p->length - x_off, want);
- uint64_t eof_offset = ROUND_UP_TO(h->file->fnode.size, super.block_size);
+ want = std::max(want, buf->max_prefetch);
+ uint64_t l = std::min(p->length - x_off, want);
+ uint64_t eof_offset = round_up_to(h->file->fnode.size, super.block_size);
if (!h->ignore_eof &&
buf->bl_off + l > eof_offset) {
l = eof_offset - buf->bl_off;
dout(20) << __func__ << " left 0x" << std::hex << left
<< " len 0x" << len << std::dec << dendl;
- int r = MIN(len, left);
+ int r = std::min(len, left);
if (outbl) {
bufferlist t;
t.substr_of(buf->bl, off - buf->bl_off, r);
<< dendl;
if (offset & ~super.block_mask()) {
offset &= super.block_mask();
- length = ROUND_UP_TO(length, super.block_size);
+ length = round_up_to(length, super.block_size);
}
uint64_t x_off = 0;
auto p = f->fnode.seek(offset, &x_off);
while (length > 0 && p != f->fnode.extents.end()) {
- uint64_t x_len = MIN(p->length - x_off, length);
+ uint64_t x_len = std::min(p->length - x_off, length);
bdev[p->bdev]->invalidate_cache(p->offset + x_off, x_len);
dout(20) << __func__ << " 0x" << std::hex << x_off << "~" << x_len
<< std:: dec << " of " << *p << dendl;
size += p.num_intervals() * (1 + 1 + sizeof(uint64_t) * 2);
size += dir_map.size() + (1 + avg_dir_size);
size += file_map.size() * (1 + avg_dir_size + avg_file_size);
- return ROUND_UP_TO(size, super.block_size);
+ return round_up_to(size, super.block_size);
}
void BlueFS::compact_log()
_compact_log_dump_metadata(&t);
// conservative estimate for final encoded size
- new_log_jump_to = ROUND_UP_TO(t.op_bl.length() + super.block_size * 2,
+ new_log_jump_to = round_up_to(t.op_bl.length() + super.block_size * 2,
cct->_conf->bluefs_alloc_size);
t.op_jump(log_seq, new_log_jump_to);
uint64_t bloff = 0;
while (length > 0) {
- uint64_t x_len = MIN(p->length - x_off, length);
+ uint64_t x_len = std::min(p->length - x_off, length);
bufferlist t;
t.substr_of(bl, bloff, x_len);
unsigned tail = x_len & ~super.block_mask();
assert(id < alloc.size());
uint64_t min_alloc_size = cct->_conf->bluefs_alloc_size;
- uint64_t left = ROUND_UP_TO(len, min_alloc_size);
+ uint64_t left = round_up_to(len, min_alloc_size);
int r = -ENOSPC;
int64_t alloc_len = 0;
AllocExtentVector extents;
{
assert(start_offset <= start_touch_offset && end_offset>= end_touch_offset);
- uint64_t lookup_start_offset = P2ALIGN(start_offset, min_alloc_size);
- uint64_t lookup_end_offset = ROUND_UP_TO(end_offset, min_alloc_size);
+ uint64_t lookup_start_offset = p2align(start_offset, min_alloc_size);
+ uint64_t lookup_end_offset = round_up_to(end_offset, min_alloc_size);
dout(30) << __func__ << " (hex): [" << std::hex
<< lookup_start_offset << ", " << lookup_end_offset
if (bi.referenced_bytes == 0) {
uint64_t len_on_disk = b_it->first->get_blob().get_ondisk_length();
int64_t blob_expected_for_release =
- ROUND_UP_TO(len_on_disk, min_alloc_size) / min_alloc_size;
+ round_up_to(len_on_disk, min_alloc_size) / min_alloc_size;
dout(30) << __func__ << " " << *(b_it->first)
<< " expected4release=" << blob_expected_for_release
if (b->is_writing() || b->is_clean()) {
if (b->offset < offset) {
uint32_t skip = offset - b->offset;
- uint32_t l = MIN(length, b->length - skip);
+ uint32_t l = min(length, b->length - skip);
res[offset].substr_of(b->data, skip, l);
res_intervals.insert(offset, l);
offset += l;
uint32_t new_blen = blen;
// make sure target_blob_size isn't less than current blob len
- target_blob_size = MAX(blen, target_blob_size);
+ target_blob_size = std::max(blen, target_blob_size);
if (b_offset >= blen) {
// new data totally stands out of the existing blob
new_blen = end;
} else {
// new data overlaps with the existing blob
- new_blen = MAX(blen, end);
+ new_blen = std::max(blen, end);
uint32_t overlap = 0;
if (new_blen > blen) {
unsigned target = cct->_conf->bluestore_extent_map_shard_target_size;
unsigned slop = target *
cct->_conf->bluestore_extent_map_shard_target_size_slop;
- unsigned extent_avg = bytes / MAX(1, extents);
+ unsigned extent_avg = bytes / std::max(1u, extents);
dout(20) << __func__ << " extent_avg " << extent_avg << ", target " << target
<< ", slop " << slop << dendl;
assert((loffs_end % min_alloc_size) == 0);
for (auto w : writes) {
if (b == w.b) {
- auto loffs2 = P2ALIGN(w.logical_offset, min_alloc_size);
- auto loffs2_end = P2ROUNDUP(w.logical_offset + w.length0, min_alloc_size);
+ auto loffs2 = p2align(w.logical_offset, min_alloc_size);
+ auto loffs2_end = p2roundup(w.logical_offset + w.length0, min_alloc_size);
if ((loffs <= loffs2 && loffs_end > loffs2) ||
(loffs >= loffs2 && loffs < loffs2_end)) {
return true;
// allocate superblock reserved space. note that we do not mark
// bluefs space as allocated in the freelist; we instead rely on
// bluefs_extents.
- uint64_t reserved = ROUND_UP_TO(MAX(SUPER_RESERVED, min_alloc_size),
- min_alloc_size);
+ uint64_t reserved = round_up_to(
+ std::max<uint64_t>(SUPER_RESERVED, min_alloc_size),
+ min_alloc_size);
fm->allocate(0, reserved, t);
if (cct->_conf->bluestore_bluefs) {
assert(bluefs_extents.num_intervals() == 1);
interval_set<uint64_t>::iterator p = bluefs_extents.begin();
- reserved = ROUND_UP_TO(p.get_start() + p.get_len(), min_alloc_size);
+ reserved = round_up_to(p.get_start() + p.get_len(), min_alloc_size);
dout(20) << __func__ << " reserved 0x" << std::hex << reserved << std::dec
<< " for bluefs" << dendl;
bufferlist bl;
dout(1) << __func__ << " pre-fragmenting freespace, using "
<< cct->_conf->bluestore_debug_prefill << " with max free extent "
<< cct->_conf->bluestore_debug_prefragment_max << dendl;
- uint64_t start = P2ROUNDUP(reserved, min_alloc_size);
+ uint64_t start = p2roundup(reserved, min_alloc_size);
uint64_t max_b = cct->_conf->bluestore_debug_prefragment_max / min_alloc_size;
float r = cct->_conf->bluestore_debug_prefill;
r /= 1.0 - r;
uint64_t l = (rand() % max_b + 1) * min_alloc_size;
if (start + l > end) {
l = end - start;
- l = P2ALIGN(l, min_alloc_size);
+ l = p2align(l, min_alloc_size);
}
assert(start + l <= end);
uint64_t u = 1 + (uint64_t)(r * (double)l);
- u = P2ROUNDUP(u, min_alloc_size);
+ u = p2roundup(u, min_alloc_size);
if (start + l + u > end) {
u = end - (start + l);
// trim to align so we don't overflow again
- u = P2ALIGN(u, min_alloc_size);
+ u = p2align(u, min_alloc_size);
stop = true;
}
assert(start + l + u <= end);
uint64_t initial =
bdev->get_size() * (cct->_conf->bluestore_bluefs_min_ratio +
cct->_conf->bluestore_bluefs_gift_ratio);
- initial = MAX(initial, cct->_conf->bluestore_bluefs_min);
+ initial = std::max(initial, cct->_conf->bluestore_bluefs_min);
if (cct->_conf->bluefs_alloc_size % min_alloc_size) {
derr << __func__ << " bluefs_alloc_size 0x" << std::hex
<< cct->_conf->bluefs_alloc_size << " is not a multiple of "
goto free_bluefs;
}
// align to bluefs's alloc_size
- initial = P2ROUNDUP(initial, cct->_conf->bluefs_alloc_size);
+ initial = p2roundup(initial, cct->_conf->bluefs_alloc_size);
// put bluefs in the middle of the device in case it is an HDD
- uint64_t start = P2ALIGN((bdev->get_size() - initial) / 2,
+ uint64_t start = p2align((bdev->get_size() - initial) / 2,
cct->_conf->bluefs_alloc_size);
bluefs->add_block_extent(bluefs_shared_bdev, start, initial);
bluefs_extents.insert(start, initial);
if (gift) {
// round up to alloc size
- gift = P2ROUNDUP(gift, cct->_conf->bluefs_alloc_size);
+ gift = p2roundup(gift, cct->_conf->bluefs_alloc_size);
// hard cap to fit into 32 bits
- gift = MIN(gift, 1ull<<31);
+ gift = std::min<uint64_t>(gift, 1 << 31);
dout(10) << __func__ << " gifting " << gift
<< " (" << pretty_si_t(gift) << ")" << dendl;
// reclaim from bluefs?
if (reclaim) {
// round up to alloc size
- reclaim = P2ROUNDUP(reclaim, cct->_conf->bluefs_alloc_size);
+ reclaim = p2roundup(reclaim, cct->_conf->bluefs_alloc_size);
// hard cap to fit into 32 bits
- reclaim = MIN(reclaim, 1ull<<31);
+ reclaim = std::min<uint64_t>(reclaim, 1 << 31);
dout(10) << __func__ << " reclaiming " << reclaim
<< " (" << pretty_si_t(reclaim) << ")" << dendl;
}
// make sure min_alloc_size is power of 2 aligned.
- if (!ISP2(min_alloc_size)) {
+ if (!isp2(min_alloc_size)) {
derr << __func__ << " min_alloc_size 0x"
<< std::hex << min_alloc_size << std::dec
<< " is not power of 2 aligned!"
BlueStore::mempool_dynamic_bitset &bitset,
std::function<void(uint64_t,
BlueStore::mempool_dynamic_bitset &)> f) {
- auto end = ROUND_UP_TO(off + len, granularity);
+ auto end = round_up_to(off + len, granularity);
while (off < end) {
uint64_t pos = off / granularity;
f(pos, bitset);
used_blocks.resize(fm->get_alloc_units());
apply(
- 0, MAX(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks,
+ 0, std::max<uint64_t>(min_alloc_size, SUPER_RESERVED), fm->get_alloc_size(), used_blocks,
[&](uint64_t pos, mempool_dynamic_bitset &bs) {
assert(pos < bs.size());
bs.set(pos);
uint64_t chunk_size = blob_len / (sizeof(*pu)*8);
uint64_t start = l.blob_offset / chunk_size;
uint64_t end =
- ROUND_UP_TO(l.blob_offset + l.length, chunk_size) / chunk_size;
+ round_up_to(l.blob_offset + l.length, chunk_size) / chunk_size;
for (auto i = start; i < end; ++i) {
(*pu) |= (1u << i);
}
length == min_alloc_size - SUPER_RESERVED) {
// this is due to the change just after luminous to min_alloc_size
// granularity allocations, and our baked in assumption at the top
- // of _fsck that 0~ROUND_UP_TO(SUPER_RESERVED,min_alloc_size) is used
- // (vs luminous's ROUND_UP_TO(SUPER_RESERVED,block_size)). harmless,
+ // of _fsck that 0~round_up_to(SUPER_RESERVED,min_alloc_size) is used
+ // (vs luminous's round_up_to(SUPER_RESERVED,block_size)). harmless,
// since we will never allocate this region below min_alloc_size.
dout(10) << __func__ << " ignoring free extent between SUPER_RESERVED"
<< " and min_alloc_size, 0x" << std::hex << offset << "~"
uint64_t x_len = length;
if (ep != eend && ep->logical_offset <= offset) {
uint64_t x_off = offset - ep->logical_offset;
- x_len = MIN(x_len, ep->length - x_off);
+ x_len = std::min(x_len, ep->length - x_off);
dout(30) << __func__ << " lextent 0x" << std::hex << offset << "~"
<< x_len << std::dec << " blob " << ep->blob << dendl;
destset.insert(offset, x_len);
size_t back_pad = 0;
size_t pad_count = 0;
if (front_pad) {
- size_t front_copy = MIN(chunk_size - front_pad, length);
+ size_t front_copy = std::min<uint64_t>(chunk_size - front_pad, length);
bufferptr z = buffer::create_page_aligned(chunk_size);
z.zero(0, front_pad, false);
pad_count += front_pad;
prev_ep = end; // to avoid this extent check as it's a duplicate
}
- auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
+ auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size);
auto min_off = offset >= max_bsize ? offset - max_bsize : 0;
uint32_t alloc_len = min_alloc_size;
- auto offset0 = P2ALIGN(offset, alloc_len);
+ auto offset0 = p2align(offset, alloc_len);
bool any_change;
uint64_t chunk_size = b->get_blob().get_chunk_size(block_size);
// can we pad our head/tail out with zeros?
uint64_t head_pad, tail_pad;
- head_pad = P2PHASE(offset, chunk_size);
- tail_pad = P2NPHASE(end_offs, chunk_size);
+ head_pad = p2phase(offset, chunk_size);
+ tail_pad = p2nphase(end_offs, chunk_size);
if (head_pad || tail_pad) {
o->extent_map.fault_range(db, offset - head_pad,
end_offs - offset + head_pad + tail_pad);
return;
}
// read some data to fill out the chunk?
- uint64_t head_read = P2PHASE(b_off, chunk_size);
- uint64_t tail_read = P2NPHASE(b_off + b_len, chunk_size);
+ uint64_t head_read = p2phase(b_off, chunk_size);
+ uint64_t tail_read = p2nphase(b_off + b_len, chunk_size);
if ((head_read || tail_read) &&
(b->get_blob().get_ondisk_length() >= b_off + b_len + tail_read) &&
head_read + tail_read < min_alloc_size) {
// new blob.
BlobRef b = c->new_blob();
- uint64_t b_off = P2PHASE(offset, alloc_len);
+ uint64_t b_off = p2phase(offset, alloc_len);
uint64_t b_off0 = b_off;
_pad_zeros(&bl, &b_off0, block_size);
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
logger->inc(l_bluestore_write_big);
logger->inc(l_bluestore_write_big_bytes, length);
o->extent_map.punch_hole(c, offset, length, &wctx->old_extents);
- auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
+ auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size);
while (length > 0) {
bool new_blob = false;
- uint32_t l = MIN(max_bsize, length);
+ uint32_t l = std::min(max_bsize, length);
BlobRef b;
uint32_t b_off = 0;
// compress (as needed) and calc needed space
uint64_t need = 0;
- auto max_bsize = MAX(wctx->target_blob_size, min_alloc_size);
+ auto max_bsize = std::max(wctx->target_blob_size, min_alloc_size);
for (auto& wi : wctx->writes) {
if (c && wi.blob_length > min_alloc_size) {
utime_t start = ceph_clock_now();
wi.compressed_bl.claim_append(t);
wi.compressed_len = wi.compressed_bl.length();
- uint64_t newlen = P2ROUNDUP(wi.compressed_len, min_alloc_size);
+ uint64_t newlen = p2roundup(wi.compressed_len, min_alloc_size);
uint64_t want_len_raw = wi.blob_length * crr;
- uint64_t want_len = P2ROUNDUP(want_len_raw, min_alloc_size);
+ uint64_t want_len = p2roundup(want_len_raw, min_alloc_size);
if (newlen <= want_len && newlen < wi.blob_length) {
// Cool. We compressed at least as much as we were hoping to.
// pad out to min_alloc_size
for (auto& p : extents) {
txc->allocated.insert(p.offset, p.length);
}
- dblob.allocated(P2ALIGN(b_off, min_alloc_size), final_length, extents);
+ dblob.allocated(p2align(b_off, min_alloc_size), final_length, extents);
dout(20) << __func__ << " blob " << *b << dendl;
if (dblob.has_csum()) {
uint64_t tail_offset, tail_length;
head_offset = offset;
- head_length = P2NPHASE(offset, min_alloc_size);
+ head_length = p2nphase(offset, min_alloc_size);
- tail_offset = P2ALIGN(end, min_alloc_size);
- tail_length = P2PHASE(end, min_alloc_size);
+ tail_offset = p2align(end, min_alloc_size);
+ tail_length = p2phase(end, min_alloc_size);
middle_offset = head_offset + head_length;
middle_length = length - head_length - tail_length;
uint32_t key_slab = get_key_slab(key_size);
uint32_t value_slab = get_value_slab(value_size);
key_hist[prefix][key_slab].count++;
- key_hist[prefix][key_slab].max_len = MAX(key_size, key_hist[prefix][key_slab].max_len);
+ key_hist[prefix][key_slab].max_len =
+ std::max<size_t>(key_size, key_hist[prefix][key_slab].max_len);
key_hist[prefix][key_slab].val_map[value_slab].count++;
key_hist[prefix][key_slab].val_map[value_slab].max_len =
- MAX(value_size, key_hist[prefix][key_slab].val_map[value_slab].max_len);
+ std::max<size_t>(value_size,
+ key_hist[prefix][key_slab].val_map[value_slab].max_len);
}
void BlueStore::DBHistogram::dump(Formatter *f)
key_size = iter->key_size();
value_size = iter->value_size();
hist.value_hist[hist.get_value_slab(value_size)]++;
- max_key_size = MAX(max_key_size, key_size);
- max_value_size = MAX(max_value_size, value_size);
+ max_key_size = std::max(max_key_size, key_size);
+ max_value_size = std::max(max_value_size, value_size);
total_key_size += key_size;
total_value_size += value_size;
if (skew)
skew = alloc_unit - skew;
*offset = p.get_start() + skew;
- *length = std::min(std::max(alloc_unit, want_size), P2ALIGN((p.get_len() - skew), alloc_unit));
+ *length = std::min(std::max(alloc_unit, want_size), p2align((p.get_len() - skew), alloc_unit));
if (cct->_conf->bluestore_debug_small_allocations) {
uint64_t max =
alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations);
}
if (p->first > offset) {
// gap
- uint64_t newlen = MIN(p->first - offset, length);
+ uint64_t newlen = std::min<uint64_t>(p->first - offset, length);
p = ref_map.insert(
map<uint64_t,record_t>::value_type(offset,
record_t(newlen, 1))).first;
assert(_au_size > 0);
assert(full_length > 0);
clear();
- uint32_t _num_au = ROUND_UP_TO(full_length, _au_size) / _au_size;
+ uint32_t _num_au = round_up_to(full_length, _au_size) / _au_size;
au_size = _au_size;
if( _num_au > 1 ) {
num_au = _num_au;
while (offset < end) {
auto phase = offset % au_size;
bytes_per_au[offset / au_size] +=
- MIN(au_size - phase, end - offset);
+ std::min(au_size - phase, end - offset);
offset += (phase ? au_size - phase : au_size);
}
}
while (offset < end) {
auto phase = offset % au_size;
size_t pos = offset / au_size;
- auto diff = MIN(au_size - phase, end - offset);
+ auto diff = std::min(au_size - phase, end - offset);
assert(diff <= bytes_per_au[pos]);
bytes_per_au[pos] -= diff;
offset += (phase ? au_size - phase : au_size);
uint32_t to_release = loffs_it->length;
do {
uint32_t to_release_part =
- MIN(pext_it->length - delta0 - delta, to_release);
+ std::min(pext_it->length - delta0 - delta, to_release);
auto o = pext_it->offset + delta0 + delta;
if (last_r != r->end() && last_r->offset + last_r->length == o) {
last_r->length += to_release_part;
}
void prune_tail(uint32_t new_len) {
if (num_au) {
- new_len = ROUND_UP_TO(new_len, au_size);
+ new_len = round_up_to(new_len, au_size);
uint32_t _num_au = new_len / au_size;
assert(_num_au <= num_au);
if (_num_au) {
bytes_per_au[0] = old_total;
} else {
assert(_au_size == au_size);
- new_len = ROUND_UP_TO(new_len, au_size);
+ new_len = round_up_to(new_len, au_size);
uint32_t _num_au = new_len / au_size;
assert(_num_au >= num_au);
if (_num_au > num_au) {
/// return chunk (i.e. min readable block) size for the blob
uint64_t get_chunk_size(uint64_t dev_block_size) const {
return has_csum() ?
- MAX(dev_block_size, get_csum_chunk_size()) : dev_block_size;
+ std::max<uint64_t>(dev_block_size, get_csum_chunk_size()) : dev_block_size;
}
uint32_t get_csum_chunk_size() const {
return 1 << csum_chunk_order;
assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
uint64_t start = offset / chunk_size;
- uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size;
+ uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size;
auto i = start;
while (i < end && (unused & (1u << i))) {
i++;
assert((blob_len % (sizeof(unused)*8)) == 0);
assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
- uint64_t start = ROUND_UP_TO(offset, chunk_size) / chunk_size;
+ uint64_t start = round_up_to(offset, chunk_size) / chunk_size;
uint64_t end = (offset + length) / chunk_size;
for (auto i = start; i < end; ++i) {
unused |= (1u << i);
assert(offset + length <= blob_len);
uint64_t chunk_size = blob_len / (sizeof(unused)*8);
uint64_t start = offset / chunk_size;
- uint64_t end = ROUND_UP_TO(offset + length, chunk_size) / chunk_size;
+ uint64_t end = round_up_to(offset + length, chunk_size) / chunk_size;
for (auto i = start; i < end; ++i) {
unused &= ~(1u << i);
}
}
while (x_len > 0) {
assert(p != extents.end());
- uint64_t l = MIN(p->length - x_off, x_len);
+ uint64_t l = std::min(p->length - x_off, x_len);
int r = f(p->offset + x_off, l);
if (r < 0)
return r;
uint64_t x_len = bl.length();
while (x_len > 0) {
assert(p != extents.end());
- uint64_t l = MIN(p->length - x_off, x_len);
+ uint64_t l = std::min(p->length - x_off, x_len);
bufferlist t;
it.copy(l, t);
f(p->offset + x_off, t);
memset(&h, 0, sizeof(h));
if (data_align >= 0)
h.pre_pad = ((unsigned int)data_align - (unsigned int)head_size) & ~CEPH_PAGE_MASK;
- off64_t size = ROUND_UP_TO(base_size + h.pre_pad, header.alignment);
+ off64_t size = round_up_to(base_size + h.pre_pad, header.alignment);
unsigned post_pad = size - base_size - h.pre_pad;
h.len = bl.length();
h.post_pad = post_pad;
{
dout(10) << __func__ << "trim(" << offset << ", " << end << dendl;
- offset = ROUND_UP_TO(offset, block_size);
+ offset = round_up_to(offset, block_size);
if (offset >= end)
return;
- end = ROUND_UP_TO(end - block_size, block_size);
+ end = round_up_to(end - block_size, block_size);
assert(end >= offset);
if (offset < end)
if (block_device_discard(fd, offset, end - offset) < 0)
} write_finish_thread;
off64_t get_top() const {
- return ROUND_UP_TO(sizeof(header), block_size);
+ return round_up_to(sizeof(header), block_size);
}
ZTracer::Endpoint trace_endpoint;
_do_read_stripe(o, offset - stripe_off, &stripe);
dout(30) << __func__ << " stripe " << offset - stripe_off << " got "
<< stripe.length() << dendl;
- unsigned swant = MIN(stripe_size - stripe_off, length);
+ unsigned swant = std::min(stripe_size - stripe_off, length);
if (stripe.length()) {
if (swant == stripe.length()) {
bl.claim_append(stripe);
} else {
unsigned l = 0;
if (stripe_off < stripe.length()) {
- l = MIN(stripe.length() - stripe_off, swant);
+ l = std::min<uint64_t>(stripe.length() - stripe_off, swant);
bufferlist t;
t.substr_of(stripe, stripe_off, l);
bl.claim_append(t);
<< ", got " << prev.length() << dendl;
bufferlist bl;
if (offset_rem) {
- unsigned p = MIN(prev.length(), offset_rem);
+ unsigned p = std::min<uint64_t>(prev.length(), offset_rem);
if (p) {
dout(20) << __func__ << " reuse leading " << p << " bytes" << dendl;
bl.substr_of(prev, 0, p);
dout(30) << __func__ << " stripe " << pos - stripe_off << " got "
<< stripe.length() << dendl;
bufferlist bl;
- bl.substr_of(stripe, 0, MIN(stripe.length(), stripe_off));
+ bl.substr_of(stripe, 0, std::min<uint64_t>(stripe.length(), stripe_off));
if (end >= pos - stripe_off + stripe_size ||
end >= o->onode.size) {
dout(20) << __func__ << " truncated stripe " << pos - stripe_off
dout(30) << __func__ << " stripe " << pos - stripe_off << " got "
<< stripe.length() << dendl;
bufferlist t;
- t.substr_of(stripe, 0, MIN(stripe_off, stripe.length()));
+ t.substr_of(stripe, 0, std::min<uint64_t>(stripe_off, stripe.length()));
_do_write_stripe(txc, o, pos - stripe_off, t);
dout(20) << __func__ << " truncated stripe " << pos - stripe_off
<< " to " << t.length() << dendl;
dout(10) << __func__ << dendl;
st->reset();
st->total = cct->_conf->memstore_device_bytes;
- st->available = MAX(int64_t(st->total) - int64_t(used_bytes), 0ll);
+ st->available = std::max<int64_t>(st->total - used_bytes, 0);
dout(10) << __func__ << ": used_bytes: " << used_bytes
<< "/" << cct->_conf->memstore_device_bytes << dendl;
return 0;
if (!oc)
return -ENOENT;
RWLock::WLocker l1(std::min(&(*c), &(*oc))->lock);
- RWLock::WLocker l2(MAX(&(*c), &(*oc))->lock);
+ RWLock::WLocker l2(std::max(&(*c), &(*oc))->lock);
if (c->object_hash.count(oid))
return -EEXIST;
if (!dc)
return -ENOENT;
RWLock::WLocker l1(std::min(&(*sc), &(*dc))->lock);
- RWLock::WLocker l2(MAX(&(*sc), &(*dc))->lock);
+ RWLock::WLocker l2(std::max(&(*sc), &(*dc))->lock);
map<ghobject_t,ObjectRef>::iterator p = sc->object_map.begin();
while (p != sc->object_map.end()) {
auto dst_iter = dst_pages.begin();
for (auto &src_page : tls_pages) {
- auto sbegin = MAX(srcoff, src_page->offset);
+ auto sbegin = std::max(srcoff, src_page->offset);
auto send = std::min(srcoff + count, src_page->offset + src_page_size);
// zero-fill holes before src_page
if (srcoff < sbegin) {
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = MAX(srcoff + delta, dst_page->offset);
+ auto dbegin = std::max(srcoff + delta, dst_page->offset);
auto dend = std::min(sbegin + delta, dst_page->offset + dst_page_size);
std::fill(dst_page->data + dbegin - dst_page->offset,
dst_page->data + dend - dst_page->offset, 0);
// copy data from src page to dst pages
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = MAX(sbegin + delta, dst_page->offset);
+ auto dbegin = std::max(sbegin + delta, dst_page->offset);
auto dend = std::min(send + delta, dst_page->offset + dst_page_size);
std::copy(src_page->data + (dbegin - delta) - src_page->offset,
if (count > 0) {
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = MAX(dstoff, dst_page->offset);
+ auto dbegin = std::max(dstoff, dst_page->offset);
auto dend = std::min(dstoff + count, dst_page->offset + dst_page_size);
std::fill(dst_page->data + dbegin - dst_page->offset,
dst_page->data + dend - dst_page->offset, 0);
private:
friend struct ECRecoveryHandle;
uint64_t get_recovery_chunk_size() const {
- return ROUND_UP_TO(cct->_conf->osd_recovery_max_chunk,
+ return round_up_to(cct->_conf->osd_recovery_max_chunk,
sinfo.get_stripe_width());
}
if (attempts && dur > 0) {
uint64_t avg_size = 1;
if (obj)
- avg_size = MAX(bytes / obj, 1);
+ avg_size = std::max<uint64_t>(bytes / obj, 1);
unsigned po = (double)target_obj_sec * dur * 1000.0 / (double)attempts;
unsigned pb = (double)target_bytes_sec / (double)avg_size * dur * 1000.0
/ (double)attempts;
dout(20) << __func__ << " po " << po << " pb " << pb << " avg_size "
<< avg_size << dendl;
if (target_obj_sec && target_bytes_sec)
- new_prob = MIN(po, pb);
+ new_prob = std::min(po, pb);
else if (target_obj_sec)
new_prob = po;
else if (target_bytes_sec)
ratio = (double)actual / (double)prob;
new_prob = (double)new_prob / ratio;
}
- new_prob = MAX(new_prob, min_prob);
- new_prob = MIN(new_prob, 1000);
+ new_prob = std::max(new_prob, min_prob);
+ new_prob = std::min(new_prob, 1000u);
// adjust
prob = (prob + new_prob) / 2;
- prob = MAX(prob, min_prob);
- prob = MIN(prob, 1000);
+ prob = std::max(prob, min_prob);
+ prob = std::min(prob, 1000u);
dout(10) << __func__ << " actual " << actual
<< ", actual/prob ratio " << ratio
<< ", adjusted new_prob " << new_prob
(osdmap->get_cluster_addr(name.num()) == con->get_peer_addr() ||
osdmap->get_hb_back_addr(name.num()) == con->get_peer_addr())) {
// remember
- epoch_t has = MAX(get_peer_epoch(name.num()), epoch);
+ epoch_t has = std::max(get_peer_epoch(name.num()), epoch);
// share?
if (has < osdmap->get_epoch()) {
return r;
}
snprintf(weight, sizeof(weight), "%.4lf",
- MAX((double).00001,
- (double)(st.total) /
- (double)(1ull << 40 /* TB */)));
+ std::max(.00001,
+ double(st.total) /
+ double(1ull << 40 /* TB */)));
}
std::multimap<string,string> loc = cct->crush_location.get_location();
<< " front " << p->second.last_rx_front
<< " (cutoff " << cutoff << ")" << dendl;
// fail
- failure_queue[p->first] = MIN(p->second.last_rx_back, p->second.last_rx_front);
+ failure_queue[p->first] = std::min(p->second.last_rx_back, p->second.last_rx_front);
}
}
}
dout(10) << "tick_without_osd_lock" << dendl;
logger->set(l_osd_buf, buffer::get_total_alloc());
- logger->set(l_osd_history_alloc_bytes, SHIFT_ROUND_UP(buffer::get_history_alloc_bytes(), 20));
+ logger->set(l_osd_history_alloc_bytes, shift_round_up(buffer::get_history_alloc_bytes(), 20));
logger->set(l_osd_history_alloc_num, buffer::get_history_alloc_num());
logger->set(l_osd_cached_crc, buffer::get_cached_crc());
logger->set(l_osd_cached_crc_adjusted, buffer::get_cached_crc_adjusted());
if (latest_subscribed_epoch >= epoch && !force_request)
return;
- latest_subscribed_epoch = MAX(epoch, latest_subscribed_epoch);
+ latest_subscribed_epoch = std::max<uint64_t>(epoch, latest_subscribed_epoch);
if (monc->sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) ||
force_request) {
uint64_t txn_size = 0;
// store new maps: queue for disk and put in the osdmap cache
- epoch_t start = MAX(superblock.newest_map + 1, first);
+ epoch_t start = std::max(superblock.newest_map + 1, first);
for (epoch_t e = start; e <= last; e++) {
if (txn_size >= t.get_num_bytes()) {
derr << __func__ << " transaction size overflowed" << dendl;
uint64_t available_pushes;
while (!awaiting_throttle.empty() &&
_recover_now(&available_pushes)) {
- uint64_t to_start = MIN(
+ uint64_t to_start = std::min(
available_pushes,
cct->_conf->osd_recovery_max_single_start);
_queue_for_recovery(awaiting_throttle.front(), to_start);
// computed using target and eventual used to get degraded total.
unsigned target = get_osdmap()->get_pg_size(info.pgid.pgid);
- unsigned nrep = MAX(actingset.size(), upset.size());
+ unsigned nrep = std::max(actingset.size(), upset.size());
// calc num_object_copies
- info.stats.stats.calc_copies(MAX(target, nrep));
+ info.stats.stats.calc_copies(std::max(target, nrep));
info.stats.stats.sum.num_objects_degraded = 0;
info.stats.stats.sum.num_objects_unfound = 0;
info.stats.stats.sum.num_objects_misplaced = 0;
osd_missing = peer_missing[p].num_missing();
}
- osd_objects = MAX(0, num_objects - osd_missing);
+ osd_objects = std::max<int64_t>(0, num_objects - osd_missing);
object_copies += osd_objects;
// Count non-missing objects not in up as misplaced
if (!in_up) {
}
} else {
// If this peer has more objects then it should, ignore them
- int64_t osd_backfilled = MIN(num_objects,
- peer_info[p].stats.stats.sum.num_objects);
+ int64_t osd_backfilled = std::min(num_objects,
+ peer_info[p].stats.stats.sum.num_objects);
backfill_target_objects.insert(make_pair(osd_backfilled, p));
backfilled += osd_backfilled;
}
++i, --num_misplaced) {
adjust_misplaced += i->first;
}
- misplaced = MAX(0, misplaced - adjust_misplaced);
+ misplaced = std::max<int64_t>(0, misplaced - adjust_misplaced);
// a degraded objects has fewer replicas or EC shards than the
// pool specifies. num_object_copies will never be smaller than target * num_objects.
- int64_t degraded = MAX(0, info.stats.stats.sum.num_object_copies - object_copies);
+ int64_t degraded = std::max<int64_t>(0, info.stats.stats.sum.num_object_copies - object_copies);
info.stats.stats.sum.num_objects_degraded = degraded;
info.stats.stats.sum.num_objects_unfound = get_num_unfound();
{
// set a max on the number of blocking peers we report. if we go
// over, report a random subset. keep the result sorted.
- unsigned keep = MIN(blocked_by.size(), cct->_conf->osd_max_pg_blocked_by);
+ unsigned keep = std::min(blocked_by.size(), cct->_conf->osd_max_pg_blocked_by);
unsigned skip = blocked_by.size() - keep;
info.stats.blocked_by.clear();
info.stats.blocked_by.resize(keep);
* left end of the range if we are a tier because they may legitimately
* not exist (see _scrub).
*/
- int min = MAX(3, cct->_conf->osd_scrub_chunk_min);
+ int min = std::max<int64_t>(3, cct->_conf->osd_scrub_chunk_min);
hobject_t start = scrubber.start;
hobject_t candidate_end;
vector<hobject_t> objects;
ret = get_pgbackend()->objects_list_partial(
start,
min,
- MAX(min, cct->_conf->osd_scrub_chunk_max),
+ std::max<int64_t>(min, cct->_conf->osd_scrub_chunk_max),
&objects,
&candidate_end);
assert(ret >= 0);
* Furthermore, the event represented by a log tail was necessarily trimmed,
* thus neither olog.tail nor log.tail can be divergent. It's
* possible that olog/log contain no actual events between olog.head and
- * MAX(log.tail, olog.tail), however, since they might have been split out.
+ * max(log.tail, olog.tail), however, since they might have been split out.
* Thus, if we cannot find an event e such that
* log.tail <= e.version <= log.head, the last_update must actually be
- * MAX(log.tail, olog.tail).
+ * max(log.tail, olog.tail).
*/
- eversion_t limit = MAX(olog.tail, log.tail);
+ eversion_t limit = std::max(olog.tail, log.tail);
eversion_t lu =
(first_non_divergent == log.log.rend() ||
first_non_divergent->version < limit) ?
// find start point in olog
list<pg_log_entry_t>::iterator to = olog.log.end();
list<pg_log_entry_t>::iterator from = olog.log.end();
- eversion_t lower_bound = MAX(olog.tail, orig_tail);
+ eversion_t lower_bound = std::max(olog.tail, orig_tail);
while (1) {
if (from == olog.log.begin())
break;
--from;
dout(20) << " ? " << *from << dendl;
if (from->version <= log.head) {
- lower_bound = MAX(lower_bound, from->version);
+ lower_bound = std::max(lower_bound, from->version);
++from;
break;
}
<< " != " << info.pgid << dendl;
result = 0; // hmm?
} else {
- unsigned list_size = MIN(cct->_conf->osd_max_pgls, p->op.pgls.count);
+ unsigned list_size = std::min<uint64_t>(cct->_conf->osd_max_pgls,
+ p->op.pgls.count);
- dout(10) << " pgnls pg=" << m->get_pg() << " count " << list_size << dendl;
+ dout(10) << " pgnls pg=" << m->get_pg() << " count " << list_size
+ << dendl;
// read into a buffer
vector<hobject_t> sentries;
pg_nls_response_t response;
<< " != " << info.pgid << dendl;
result = 0; // hmm?
} else {
- unsigned list_size = MIN(cct->_conf->osd_max_pgls, p->op.pgls.count);
+ unsigned list_size = std::min<uint64_t>(cct->_conf->osd_max_pgls,
+ p->op.pgls.count);
dout(10) << " pgls pg=" << m->get_pg() << " count " << list_size << dendl;
// read into a buffer
target = cct->_conf->osd_max_pg_log_entries;
}
- eversion_t limit = MIN(
+ eversion_t limit = std::min(
min_last_complete_ondisk,
pg_log.get_can_rollback_to());
size_t log_size = pg_log.get_log().log.size();
// XXX the op.extent.length is the requested length for async read
// On error this length is changed to 0 after the error comes back.
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(op.extent.length, 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10);
ctx->delta_stats.num_rd++;
return result;
}
// verify trailing hole?
if (cct->_conf->osd_verify_sparse_read_holes) {
- uint64_t end = MIN(op.extent.offset + op.extent.length, oi.size);
+ uint64_t end = std::min<uint64_t>(op.extent.offset + op.extent.length,
+ oi.size);
if (last < end) {
bufferlist t;
uint64_t len = end - last;
<< soid << dendl;
}
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(op.extent.length, 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10);
ctx->delta_stats.num_rd++;
return 0;
}
if (r < 0)
result = r;
else
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(bl.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10);
ctx->delta_stats.num_rd++;
dout(10) << " map_extents done on object " << soid << dendl;
}
if (r >= 0) {
op.xattr.value_len = osd_op.outdata.length();
result = 0;
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
} else
result = r;
bufferlist bl;
encode(out, bl);
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(bl.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10);
ctx->delta_stats.num_rd++;
osd_op.outdata.claim_append(bl);
}
break;
ctx->delta_stats.num_rd++;
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(xattr.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(xattr.length(), 10);
switch (op.xattr.cmp_mode) {
case CEPH_OSD_CMPXATTR_MODE_STRING:
result = -EFBIG;
break;
}
- unsigned max_name_len = MIN(osd->store->get_max_attr_name_length(),
- cct->_conf->osd_max_attr_name_len);
+ unsigned max_name_len =
+ std::min<uint64_t>(osd->store->get_max_attr_name_length(),
+ cct->_conf->osd_max_attr_name_len);
if (op.xattr.name_len > max_name_len) {
result = -ENAMETOOLONG;
break;
encode(num, osd_op.outdata);
osd_op.outdata.claim_append(bl);
encode(truncated, osd_op.outdata);
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
encode(num, osd_op.outdata);
osd_op.outdata.claim_append(bl);
encode(truncated, osd_op.outdata);
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
++ctx->num_read;
{
osd->store->omap_get_header(ch, ghobject_t(soid), &osd_op.outdata);
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
osd->store->omap_get_values(ch, ghobject_t(soid), keys_to_get, &out);
} // else return empty omap entries
encode(out, osd_op.outdata);
- ctx->delta_stats.num_rd_kb += SHIFT_ROUND_UP(osd_op.outdata.length(), 10);
+ ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
}
t->omap_setkeys(soid, to_set_bl);
ctx->delta_stats.num_wr++;
- ctx->delta_stats.num_wr_kb += SHIFT_ROUND_UP(to_set_bl.length(), 10);
+ ctx->delta_stats.num_wr_kb += shift_round_up(to_set_bl.length(), 10);
}
obs.oi.set_flag(object_info_t::FLAG_OMAP);
obs.oi.clear_omap_digest();
}
}
delta_stats.num_wr++;
- delta_stats.num_wr_kb += SHIFT_ROUND_UP(length, 10);
+ delta_stats.num_wr_kb += shift_round_up(length, 10);
}
void PrimaryLogPG::truncate_update_size_and_usage(
// finish and log the op.
if (ctx->user_modify) {
// update the user_version for any modify ops, except for the watch op
- ctx->user_at_version = MAX(info.last_user_version, ctx->new_obs.oi.user_version) + 1;
+ ctx->user_at_version = std::max(info.last_user_version, ctx->new_obs.oi.user_version) + 1;
/* In order for new clients and old clients to interoperate properly
* when exchanging versions, we need to lower bound the user_version
* (which our new clients pay proper attention to)
bufferlist& bl = reply_obj.data;
if (left > 0 && !cursor.data_complete) {
if (cursor.data_offset < oi.size) {
- uint64_t max_read = MIN(oi.size - cursor.data_offset, (uint64_t)left);
+ uint64_t max_read = std::min(oi.size - cursor.data_offset, (uint64_t)left);
if (cb) {
async_read_started = true;
ctx->pending_async_reads.push_back(
ctx->delta_stats.num_bytes += obs.oi.size;
}
ctx->delta_stats.num_wr++;
- ctx->delta_stats.num_wr_kb += SHIFT_ROUND_UP(obs.oi.size, 10);
+ ctx->delta_stats.num_wr_kb += shift_round_up(obs.oi.size, 10);
osd->logger->inc(l_osd_copyfrom);
}
flush_ops[soid] = fop;
info.stats.stats.sum.num_flush++;
- info.stats.stats.sum.num_flush_kb += SHIFT_ROUND_UP(oi.size, 10);
+ info.stats.stats.sum.num_flush_kb += shift_round_up(oi.size, 10);
return -EINPROGRESS;
}
* All objects in PG in [MIN,backfill_info.begin) have been backfilled to all
* backfill_targets. There may be objects on backfill_target(s) yet to be deleted.
*
- * For a backfill target, all objects < MIN(peer_backfill_info[target].begin,
+ * For a backfill target, all objects < std::min(peer_backfill_info[target].begin,
* backfill_info.begin) in PG are backfilled. No deleted objects in this
* interval remain on the backfill target.
*
* have been backfilled to target
*
* There *MAY* be missing/outdated objects between last_backfill_started and
- * MIN(peer_backfill_info[*].begin, backfill_info.begin) in the event that client
+ * std::min(peer_backfill_info[*].begin, backfill_info.begin) in the event that client
* io created objects since the last scan. For this reason, we call
* update_range() again before continuing backfill.
*/
if (obc->obs.oi.is_omap())
ctx->delta_stats.num_objects_omap--;
ctx->delta_stats.num_evict++;
- ctx->delta_stats.num_evict_kb += SHIFT_ROUND_UP(obc->obs.oi.size, 10);
+ ctx->delta_stats.num_evict_kb += shift_round_up(obc->obs.oi.size, 10);
if (obc->obs.oi.is_dirty())
--ctx->delta_stats.num_objects_dirty;
assert(r == 0);
uint64_t avg_size = num_user_bytes / num_user_objects;
dirty_micro =
num_dirty * avg_size * 1000000 /
- MAX(pool.info.target_max_bytes / divisor, 1);
+ std::max<uint64_t>(pool.info.target_max_bytes / divisor, 1);
full_micro =
num_user_objects * avg_size * 1000000 /
- MAX(pool.info.target_max_bytes / divisor, 1);
+ std::max<uint64_t>(pool.info.target_max_bytes / divisor, 1);
}
if (pool.info.target_max_objects > 0) {
uint64_t dirty_objects_micro =
num_dirty * 1000000 /
- MAX(pool.info.target_max_objects / divisor, 1);
+ std::max<uint64_t>(pool.info.target_max_objects / divisor, 1);
if (dirty_objects_micro > dirty_micro)
dirty_micro = dirty_objects_micro;
uint64_t full_objects_micro =
num_user_objects * 1000000 /
- MAX(pool.info.target_max_objects / divisor, 1);
+ std::max<uint64_t>(pool.info.target_max_objects / divisor, 1);
if (full_objects_micro > full_micro)
full_micro = full_objects_micro;
}
flush_target += flush_slop;
flush_high_target += flush_slop;
} else {
- flush_target -= MIN(flush_target, flush_slop);
- flush_high_target -= MIN(flush_high_target, flush_slop);
+ flush_target -= std::min(flush_target, flush_slop);
+ flush_high_target -= std::min(flush_high_target, flush_slop);
}
if (dirty_micro > flush_high_target) {
if (restart || agent_state->evict_mode == TierAgentState::EVICT_MODE_IDLE)
evict_target += evict_slop;
else
- evict_target -= MIN(evict_target, evict_slop);
+ evict_target -= std::min(evict_target, evict_slop);
if (full_micro > 1000000) {
// evict anything clean
evict_mode = TierAgentState::EVICT_MODE_SOME;
uint64_t over = full_micro - evict_target;
uint64_t span = 1000000 - evict_target;
- evict_effort = MAX(over * 1000000 / span,
- (unsigned)(1000000.0 * cct->_conf->osd_agent_min_evict_effort));
+ evict_effort = std::max(over * 1000000 / span,
+ uint64_t(1000000.0 *
+ cct->_conf->osd_agent_min_evict_effort));
// quantize effort to avoid too much reordering in the agent_queue.
uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000;
if (!parent->should_send_op(peer, soid)) {
dout(10) << "issue_repop shipping empty opt to osd." << peer
<<", object " << soid
- << " beyond MAX(last_backfill_started "
+ << " beyond std::max(last_backfill_started "
<< ", pinfo.last_backfill "
<< pinfo.last_backfill << ")" << dendl;
ObjectStore::Transaction t;
encode(nspace, bl);
encode(hash, bl);
if (hash != -1)
- encode_compat = MAX(encode_compat, 6); // need to interpret the hash
+ encode_compat = std::max<std::uint8_t>(encode_compat, 6); // need to interpret the hash
ENCODE_FINISH_NEW_COMPAT(bl, encode_compat);
}
uint64_t stripeno = off / su + objectsetno * stripes_per_object;
uint64_t blockno = stripeno * stripe_count + stripepos;
uint64_t extent_off = blockno * su + off_in_block;
- uint64_t extent_len = MIN(len, su - off_in_block);
+ uint64_t extent_len = std::min(len, su - off_in_block);
extents.push_back(make_pair(extent_off, extent_len));
ldout(cct, 20) << " object " << off << "~" << extent_len
p != buffer_extents.end();
++p) {
pair<bufferlist, uint64_t>& r = partial[p->first];
- size_t actual = MIN(bl.length(), p->second);
+ size_t actual = std::min<uint64_t>(bl.length(), p->second);
bl.splice(0, actual, &r.first);
r.second = p->second;
total_intended_len += r.second;
if (s->first > bl_off) {
// gap in sparse read result
pair<bufferlist, uint64_t>& r = partial[tofs];
- size_t gap = MIN(s->first - bl_off, tlen);
+ size_t gap = std::min(s->first - bl_off, tlen);
ldout(cct, 20) << " s gap " << gap << ", skipping" << dendl;
r.second = gap;
total_intended_len += r.second;
assert(s->first <= bl_off);
size_t left = (s->first + s->second) - bl_off;
- size_t actual = MIN(left, tlen);
+ size_t actual = std::min(left, tlen);
if (actual > 0) {
ldout(cct, 20) << " s has " << actual << ", copying" << dendl;
}
int decompress(bufferlist::iterator &p, size_t compressed_len, bufferlist &out) override
{
- p.copy(MIN(p.get_remaining(), compressed_len), out);
+ p.copy(std::min<size_t>(p.get_remaining(), compressed_len), out);
return 0;
}
};
size_t left = out.length()-small_prefix_size;
size_t offs = small_prefix_size;
while( left > 0 ){
- size_t shard_size = MIN( 2048, left );
+ size_t shard_size = std::min<size_t>(2048, left);
tmp.substr_of(out, offs, shard_size );
out2.append( tmp );
left -= shard_size;
for (int i=0; i<1000; i++) {
bl.clear();
got.clear();
- bl.append(buffer::create(MIN(g_conf->osd_max_attr_size, 1024)));
+ bl.append(buffer::create(std::min<uint64_t>(g_conf->osd_max_attr_size,
+ 1024)));
char n[10];
snprintf(n, sizeof(n), "a%d", i);
ASSERT_EQ(0, ioctx.setxattr("foo", n, bl));
uint64_t dstoff = srcoff; //u1(*rng);
uint64_t len = u2(*rng);
if (write_alignment) {
- srcoff = ROUND_UP_TO(srcoff, write_alignment);
- dstoff = ROUND_UP_TO(dstoff, write_alignment);
- len = ROUND_UP_TO(len, write_alignment);
+ srcoff = round_up_to(srcoff, write_alignment);
+ dstoff = round_up_to(dstoff, write_alignment);
+ len = round_up_to(len, write_alignment);
}
if (srcoff > srcdata.length() - 1) {
uint64_t len = u2(*rng);
bufferlist bl;
if (write_alignment) {
- offset = ROUND_UP_TO(offset, write_alignment);
- len = ROUND_UP_TO(len, write_alignment);
+ offset = round_up_to(offset, write_alignment);
+ len = round_up_to(len, write_alignment);
}
filled_byte_array(bl, len);
boost::uniform_int<> choose(0, max_object_len);
size_t len = choose(*rng);
if (write_alignment) {
- len = ROUND_UP_TO(len, write_alignment);
+ len = round_up_to(len, write_alignment);
}
t.truncate(cid, obj, len);
uint64_t offset = u1(*rng);
uint64_t len = u2(*rng);
if (write_alignment) {
- offset = ROUND_UP_TO(offset, write_alignment);
- len = ROUND_UP_TO(len, write_alignment);
+ offset = round_up_to(offset, write_alignment);
+ len = round_up_to(len, write_alignment);
}
if (len > 0) {
for (uint64_t i = 0; i < num_ops; ++i) {
uint64_t offset = random() % max_obj_size;
- uint64_t max_len = MIN(max_obj_size - offset, max_op_len);
+ uint64_t max_len = std::min(max_obj_size - offset, max_op_len);
// no zero-length operations
- uint64_t length = random() % (MAX(max_len - 1, 1)) + 1;
+ uint64_t length = random() % (std::max<uint64_t>(max_len - 1, 1)) + 1;
std::string oid = "test" + stringify(random() % num_objs);
bool is_read = random() < percent_reads * RAND_MAX;
ceph::shared_ptr<op_data> op(new op_data(oid, offset, length, is_read));