} else {
utime_t next_ttl = now;
next_ttl += ttl;
- ek.expiration = MAX(next_ttl, r.next().expiration);
+ ek.expiration = std::max(next_ttl, r.next().expiration);
}
ek.expiration += ttl;
uint64_t secret_id = r.add(ek);
}
for (auto object_position : client.commit_position.object_positions) {
- minimum_tag_tid = MIN(minimum_tag_tid, object_position.tag_tid);
+ minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid);
}
}
if (!vals.empty()) {
}
for (auto object_position : client.commit_position.object_positions) {
- minimum_tag_tid = MIN(minimum_tag_tid, object_position.tag_tid);
+ minimum_tag_tid = std::min(minimum_tag_tid, object_position.tag_tid);
}
// compute minimum tags in use per-class
unsigned num_unsent = last_log - last_log_sent;
unsigned num_send;
if (cct->_conf->mon_client_max_log_entries_per_message > 0)
- num_send = MIN(num_unsent, (unsigned)cct->_conf->mon_client_max_log_entries_per_message);
+ num_send = std::min(num_unsent, (unsigned)cct->_conf->mon_client_max_log_entries_per_message);
else
num_send = num_unsent;
m_readahead_pos = m_last_pos;
}
}
- m_readahead_size = MAX(m_readahead_size, m_readahead_min_bytes);
- m_readahead_size = MIN(m_readahead_size, m_readahead_max_bytes);
+ m_readahead_size = std::max(m_readahead_size, m_readahead_min_bytes);
+ m_readahead_size = std::min(m_readahead_size, m_readahead_max_bytes);
readahead_offset = m_readahead_pos;
readahead_length = m_readahead_size;
continue;
}
- T start = MAX(pa->first, pb->first);
- T en = MIN(pa->first+pa->second, pb->first+pb->second);
+ T start = std::max(pa->first, pb->first);
+ T en = std::min(pa->first+pa->second, pb->first+pb->second);
assert(en > start);
typename decltype(m)::value_type i{start, en - start};
mi = m.insert(mi, i);
ldout(m_cct, 0) << "client flagged disconnected: " << m_client_id
<< dendl;
}
- m_minimum_set = MAX(m_minimum_set, refresh->minimum_set);
- m_active_set = MAX(m_active_set, refresh->active_set);
+ m_minimum_set = std::max(m_minimum_set, refresh->minimum_set);
+ m_active_set = std::max(m_active_set, refresh->active_set);
m_registered_clients = refresh->registered_clients;
m_client = *it;
}
if (m_remove_set_pending) {
- m_remove_set = MAX(m_remove_set, minimum_set);
+ m_remove_set = std::max(m_remove_set, minimum_set);
return;
}
uint64_t bytes_remaining = length;
uint64_t event_offset = 0;
do {
- uint64_t event_length = MIN(bytes_remaining, max_write_data_size);
+ uint64_t event_length = std::min(bytes_remaining, max_write_data_size);
bufferlist event_bl;
event_bl.substr_of(bl, event_offset, event_length);
}
object_map.resize(num_objs);
- uint64_t overlap = MIN(object_map.size(), prev_object_map.size());
+ uint64_t overlap = std::min(object_map.size(), prev_object_map.size());
for (uint64_t i = 0; i < overlap; ++i) {
ldout(cct, 20) << __func__ << ": object state: " << i << " "
<< static_cast<uint32_t>(prev_object_map[i])
size_t offset = 0;
int idx = 0;
for (; offset < length && idx < vector.iov_count; idx++) {
- size_t len = MIN(vector.iov[idx].iov_len, length - offset);
+ size_t len = std::min(vector.iov[idx].iov_len, length - offset);
it.copy(len, static_cast<char *>(vector.iov[idx].iov_base));
offset += len;
}
RWLock::WLocker snap_locker(m_image_ctx->snap_lock);
m_snap_ids.push_back(CEPH_NOSNAP);
for (auto it : m_image_ctx->snap_info) {
- max_size = MAX(max_size, it.second.size);
+ max_size = std::max(max_size, it.second.size);
m_snap_ids.push_back(it.first);
}
if (image_ctx.parent == NULL) {
m_new_parent_overlap = 0;
} else {
- m_new_parent_overlap = MIN(m_new_size, image_ctx.parent_md.overlap);
+ m_new_parent_overlap = std::min(m_new_size, image_ctx.parent_md.overlap);
}
}
{
uint64_t period = image_ctx.get_stripe_period();
uint64_t new_num_periods = ((m_new_size + period - 1) / period);
- m_delete_off = MIN(new_num_periods * period, original_size);
+ m_delete_off = std::min(new_num_periods * period, original_size);
// first object we can delete free and clear
m_delete_start = new_num_periods * image_ctx.get_stripe_count();
m_delete_start_min = m_delete_start;
new_snap->past_parents[oldparentseq].ino = oldparent->inode->ino();
new_snap->past_parents[oldparentseq].first = new_snap->current_parent_since;
}
- new_snap->current_parent_since = MAX(oldparentseq, newparent->get_last_created()) + 1;
+ new_snap->current_parent_since = std::max(oldparentseq, newparent->get_last_created()) + 1;
}
}
snapid_t t = first;
if (!old_inodes.empty())
t = old_inodes.begin()->second.first;
- return MIN(t, oldest_snap);
+ return std::min(t, oldest_snap);
}
old_inode_t& CInode::cow_old_inode(snapid_t follows, bool cow_head)
// max_size is min of projected, actual
uint64_t max_size =
- MIN(oi->client_ranges.count(client) ?
+ std::min(oi->client_ranges.count(client) ?
oi->client_ranges[client].range.last : 0,
pi->client_ranges.count(client) ?
pi->client_ranges[client].range.last : 0);
// max_size is min of projected, actual.
uint64_t oldms = oi->client_ranges.count(client) ? oi->client_ranges[client].range.last : 0;
uint64_t newms = pi->client_ranges.count(client) ? pi->client_ranges[client].range.last : 0;
- m->max_size = MIN(oldms, newms);
+ m->max_size = std::min(oldms, newms);
i = pauth ? pi:oi;
m->head.mode = i->mode;
uint64_t max_inc = g_conf->mds_client_writeable_range_max_inc_objs;
if (max_inc > 0) {
max_inc *= pi->get_layout_size_increment();
- new_max = MIN(new_max, size + max_inc);
+ new_max = std::min(new_max, size + max_inc);
}
return ROUND_UP_TO(new_max, pi->get_layout_size_increment());
}
client_writeable_range_t& oldr = latest->client_ranges[p->first];
if (ms > oldr.range.last)
*max_increased = true;
- nr.range.last = MAX(ms, oldr.range.last);
+ nr.range.last = std::max(ms, oldr.range.last);
nr.follows = oldr.follows;
} else {
*max_increased = true;
bool max_increased = false;
if (update_size) {
- new_size = size = MAX(size, new_size);
- new_mtime = MAX(new_mtime, latest->mtime);
+ new_size = size = std::max(size, new_size);
+ new_mtime = std::max(new_mtime, latest->mtime);
if (latest->size == new_size && latest->mtime == new_mtime)
update_size = false;
}
if (cur->last != CEPH_NOSNAP) {
assert(cur->dirty_old_rstats.empty());
- set<snapid_t>::const_iterator q = snaps.lower_bound(MAX(first, floor));
+ set<snapid_t>::const_iterator q = snaps.lower_bound(std::max(first, floor));
if (q == snaps.end() || *q > cur->last)
return;
}
assert(cur->is_frozen_inode());
update = false;
}
- _project_rstat_inode_to_frag(*curi, MAX(first, floor), cur->last, parent,
+ _project_rstat_inode_to_frag(*curi, std::max(first, floor), cur->last, parent,
linkunlink, update);
}
p != cur->dirty_old_rstats.end();
++p) {
old_inode_t& old = cur->old_inodes[*p];
- snapid_t ofirst = MAX(old.first, floor);
+ snapid_t ofirst = std::max(old.first, floor);
set<snapid_t>::const_iterator q = snaps.lower_bound(ofirst);
if (q == snaps.end() || *q > *p)
continue;
fnode_t *pf = parent->get_projected_fnode();
if (last == CEPH_NOSNAP) {
if (g_conf->mds_snap_rstat)
- first = MAX(ofirst, parent->first);
+ first = std::max(ofirst, parent->first);
else
first = parent->first;
prstat = &pf->rstat;
snapid_t first;
if (last == pin->last) {
pi = pin->get_projected_inode();
- first = MAX(ofirst, pin->first);
+ first = std::max(ofirst, pin->first);
if (first > pin->first) {
old_inode_t& old = pin->cow_old_inode(first-1, cow_head);
dout(20) << " cloned old_inode rstat is " << old.inode.rstat << dendl;
const uint64_t num = (item.size > 0) ?
Striper::get_num_objects(item.layout, item.size) : 1;
- ops_required = MIN(num, g_conf->filer_max_purge_ops);
+ ops_required = std::min(num, g_conf->filer_max_purge_ops);
// Account for removing (or zeroing) backtrace
ops_required += 1;
// User may also specify a hard limit, apply this if so.
if (cct->_conf->mds_max_purge_ops) {
- max_purge_ops = MIN(max_purge_ops, cct->_conf->mds_max_purge_ops);
+ max_purge_ops = std::min(max_purge_ops, cct->_conf->mds_max_purge_ops);
}
}
dout(10) << " current parent [" << srnode.current_parent_since << ",head] is " << *parent
<< " on " << *parent->inode << dendl;
if (last >= srnode.current_parent_since &&
- !parent->_open_parents(finish, MAX(first, srnode.current_parent_since), last))
+ !parent->_open_parents(finish, std::max(first, srnode.current_parent_since), last))
return false;
}
return false;
}
SnapRealm *parent_realm = open_past_parents[p->second.ino].first;
- if (!parent_realm->have_past_parents_open(MAX(first, p->second.first),
- MIN(last, p->first)))
+ if (!parent_realm->have_past_parents_open(std::max(first, p->second.first),
+ std::min(last, p->first)))
return false;
}
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
oldparent->snaprealm->build_snap_set(s, max_seq, max_last_created, max_last_destroyed,
- MAX(first, p->second.first),
- MIN(last, p->first));
+ std::max(first, p->second.first),
+ std::min(last, p->first));
}
if (srnode.current_parent_since <= last && parent)
parent->build_snap_set(s, max_seq, max_last_created, max_last_destroyed,
- MAX(first, srnode.current_parent_since), last);
+ std::max(first, srnode.current_parent_since), last);
}
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
oldparent->snaprealm->get_snap_info(infomap,
- MAX(first, p->second.first),
- MIN(last, p->first));
+ std::max(first, p->second.first),
+ std::min(last, p->first));
}
if (srnode.current_parent_since <= last && parent)
- parent->get_snap_info(infomap, MAX(first, srnode.current_parent_since), last);
+ parent->get_snap_info(infomap, std::max(first, srnode.current_parent_since), last);
}
const string& SnapRealm::get_snapname(snapid_t snapid, inodeno_t atino)
assert(oldparent); // call open_parents first!
assert(oldparent->snaprealm);
snapid_t r = oldparent->snaprealm->resolve_snapname(n, atino,
- MAX(first, p->second.first),
- MIN(last, p->first));
+ std::max(first, p->second.first),
+ std::min(last, p->first));
if (r)
return r;
}
if (parent && srnode.current_parent_since <= last)
- return parent->resolve_snapname(n, atino, MAX(first, srnode.current_parent_since), last);
+ return parent->resolve_snapname(n, atino, std::max(first, srnode.current_parent_since), last);
return 0;
}
uint64_t to = 0;
if (in->is_file()) {
to = in->inode.get_max_size();
- to = MAX(in->inode.size, to);
+ to = std::max(in->inode.size, to);
// when truncating a file, the filer does not delete stripe objects that are
// truncated to zero. so we need to purge stripe objects up to the max size
// the file has ever been.
- to = MAX(in->inode.max_size_ever, to);
+ to = std::max(in->inode.max_size_ever, to);
}
inode_t *pi = in->get_projected_inode();
const SnapContext *snapc = &realm->get_snap_context();
uint64_t to = in->inode.get_max_size();
- to = MAX(in->inode.size, to);
+ to = std::max(in->inode.size, to);
// when truncating a file, the filer does not delete stripe objects that are
// truncated to zero. so we need to purge stripe objects up to the max size
// the file has ever been.
- to = MAX(in->inode.max_size_ever, to);
+ to = std::max(in->inode.max_size_ever, to);
assert(to > 0);
// getting beacons through recently.
utime_t latest_beacon;
for (const auto & i : last_beacon) {
- latest_beacon = MAX(i.second.stamp, latest_beacon);
+ latest_beacon = std::max(i.second.stamp, latest_beacon);
}
const bool may_replace = latest_beacon >
(ceph_clock_now() -
- MAX(g_conf->mds_beacon_interval, g_conf->mds_beacon_grace * 0.5));
+ std::max(g_conf->mds_beacon_interval, g_conf->mds_beacon_grace * 0.5));
// are we in?
// and is there a non-laggy standby that can take over for us?
utime_t now = ceph_clock_now();
utime_t cutoff = now;
- cutoff -= MIN(30.0, cct->_conf->auth_service_ticket_ttl / 4.0);
+ cutoff -= std::min(30.0, cct->_conf->auth_service_ticket_ttl / 4.0);
utime_t issued_at_lower_bound = now;
issued_at_lower_bound -= cct->_conf->auth_service_ticket_ttl;
if (!rotating_secrets->need_new_secrets(cutoff)) {
sync_stash_critical_state(t);
t->put("mon_sync", "in_sync", 1);
- sync_last_committed_floor = MAX(sync_last_committed_floor, paxos->get_version());
+ sync_last_committed_floor = std::max(sync_last_committed_floor, paxos->get_version());
dout(10) << __func__ << " marking sync in progress, storing sync_last_committed_floor "
<< sync_last_committed_floor << dendl;
t->put("mon_sync", "last_committed_floor", sync_last_committed_floor);
}
// pick new pn
- accepted_pn = get_new_proposal_number(MAX(accepted_pn, oldpn));
+ accepted_pn = get_new_proposal_number(std::max(accepted_pn, oldpn));
accepted_pn_from = last_committed;
num_last = 1;
dout(10) << "collect with pn " << accepted_pn << dendl;
void Paxos::trim()
{
assert(should_trim());
- version_t end = MIN(get_version() - g_conf->paxos_min,
+ version_t end = std::min(get_version() - g_conf->paxos_min,
get_first_committed() + g_conf->paxos_trim_max);
if (first_committed >= end)
// space for next ptr.
if (len > 0)
break;
- seglen = MIN(seglen, available);
+ seglen = std::min(seglen, available);
}
len += seglen;
frags.push_back(fragment{(char*)pb->c_str(), seglen});
pseudo_vector fragments() { return { frags, _nr_frags }; }
static std::unique_ptr<impl> allocate(size_t nr_frags) {
- nr_frags = MAX(nr_frags, default_nr_frags);
+ nr_frags = std::max(nr_frags, default_nr_frags);
return std::unique_ptr<impl>(new (nr_frags) impl(nr_frags));
}
* split due to coalescing of a segment (front, middle,
* data) boundary */
- take_len = MIN(blen, msg_iov->iov_len);
+ take_len = std::min(blen, msg_iov->iov_len);
payload.append(
buffer::create_msg(
take_len, (char*) msg_iov->iov_base, m_hook));
iovs = vmsg_sglist(&tmsg->in);
for (; blen && (ix < iov_len); ++ix) {
msg_iov = &iovs[ix];
- take_len = MIN(blen, msg_iov->iov_len);
+ take_len = std::min(blen, msg_iov->iov_len);
middle.append(
buffer::create_msg(
take_len, (char*) msg_iov->iov_base, m_hook));
while (num_blocks) {
bit = start_block % BmapEntry::size();
bmap = &m_bmap_vec[start_block / BmapEntry::size()];
- falling_in_bmap = MIN(num_blocks, BmapEntry::size() - bit);
+ falling_in_bmap = std::min(num_blocks, BmapEntry::size() - bit);
if (!bmap->is_allocated(bit, falling_in_bmap)) {
return false;
while (blks) {
bit = start_block % BmapEntry::size();
bmap = &m_bmap_vec[start_block / BmapEntry::size()];
- falling_in_bmap = MIN(blks, BmapEntry::size() - bit);
+ falling_in_bmap = std::min(blks, BmapEntry::size() - bit);
bmap->set_bits(bit, falling_in_bmap);
while (count) {
bit = first_blk % BmapEntry::size();
bmap = &m_bmap_vec[first_blk / BmapEntry::size()];
- falling_in_bmap = MIN(count, BmapEntry::size() - bit);
+ falling_in_bmap = std::min(count, BmapEntry::size() - bit);
bmap->clear_bits(bit, falling_in_bmap);
start_block / m_child_size_blocks));
area_block_offset = start_block % m_child_size_blocks;
- falling_in_area = MIN(m_child_size_blocks - area_block_offset,
+ falling_in_area = std::min(m_child_size_blocks - area_block_offset,
num_blocks);
if (!area->is_allocated(area_block_offset, falling_in_area)) {
return false;
start_blk / m_child_size_blocks));
child_block_offset = start_blk % child->size();
- falling_in_child = MIN(m_child_size_blocks - child_block_offset,
+ falling_in_child = std::min(m_child_size_blocks - child_block_offset,
blks);
child->set_blocks_used(child_block_offset, falling_in_child);
start_blk += falling_in_child;
child_block_offset = start_block % m_child_size_blocks;
- falling_in_child = MIN(m_child_size_blocks - child_block_offset,
+ falling_in_child = std::min(m_child_size_blocks - child_block_offset,
num_blocks);
child->free_blocks(child_block_offset, falling_in_child);
start_block += falling_in_child;
child_block_offset = start_block % m_child_size_blocks;
- falling_in_child = MIN(m_child_size_blocks - child_block_offset,
+ falling_in_child = std::min(m_child_size_blocks - child_block_offset,
num_blocks);
child->lock_excl();
<< " alloc_unit 0x" << alloc_unit
<< " hint 0x" << hint << std::dec
<< dendl;
- uint64_t want = MAX(alloc_unit, want_size);
+ uint64_t want = std::max(alloc_unit, want_size);
int bin = _choose_bin(want);
int orig_bin = bin;
if (skew)
skew = alloc_unit - skew;
*offset = p.get_start() + skew;
- *length = MIN(MAX(alloc_unit, want_size), P2ALIGN((p.get_len() - skew), alloc_unit));
+ *length = std::min(std::max(alloc_unit, want_size), P2ALIGN((p.get_len() - skew), alloc_unit));
if (cct->_conf->bluestore_debug_small_allocations) {
uint64_t max =
alloc_unit * (rand() % cct->_conf->bluestore_debug_small_allocations);
ExtentList block_list = ExtentList(extents, 1, max_alloc_size);
while (allocated_size < want_size) {
- res = allocate_int(MIN(max_alloc_size, (want_size - allocated_size)),
+ res = allocate_int(std::min(max_alloc_size, (want_size - allocated_size)),
alloc_unit, hint, &offset, &length);
if (res != 0) {
/*
Mutex::Locker l(apply_lock);
dout(10) << "op_apply_finish " << op << " open_ops " << open_ops << " -> "
<< (open_ops-1) << ", max_applied_seq " << max_applied_seq << " -> "
- << MAX(op, max_applied_seq) << dendl;
+ << std::max(op, max_applied_seq) << dendl;
--open_ops;
assert(open_ops >= 0);
CollectionRef oc = get_collection(ocid);
if (!oc)
return -ENOENT;
- RWLock::WLocker l1(MIN(&(*c), &(*oc))->lock);
+ RWLock::WLocker l1(std::min(&(*c), &(*oc))->lock);
RWLock::WLocker l2(MAX(&(*c), &(*oc))->lock);
if (c->object_hash.count(oid))
CollectionRef dc = get_collection(dest);
if (!dc)
return -ENOENT;
- RWLock::WLocker l1(MIN(&(*sc), &(*dc))->lock);
+ RWLock::WLocker l1(std::min(&(*sc), &(*dc))->lock);
RWLock::WLocker l2(MAX(&(*sc), &(*dc))->lock);
map<ghobject_t,ObjectRef>::iterator p = sc->object_map.begin();
auto dst_iter = dst_pages.begin();
for (auto &src_page : tls_pages) {
- auto sbegin = std::max(srcoff, src_page->offset);
+ auto sbegin = MAX(srcoff, src_page->offset);
auto send = std::min(srcoff + count, src_page->offset + src_page_size);
// zero-fill holes before src_page
if (srcoff < sbegin) {
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = std::max(srcoff + delta, dst_page->offset);
+ auto dbegin = MAX(srcoff + delta, dst_page->offset);
auto dend = std::min(sbegin + delta, dst_page->offset + dst_page_size);
std::fill(dst_page->data + dbegin - dst_page->offset,
dst_page->data + dend - dst_page->offset, 0);
// copy data from src page to dst pages
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = std::max(sbegin + delta, dst_page->offset);
+ auto dbegin = MAX(sbegin + delta, dst_page->offset);
auto dend = std::min(send + delta, dst_page->offset + dst_page_size);
std::copy(src_page->data + (dbegin - delta) - src_page->offset,
if (count > 0) {
while (dst_iter != dst_pages.end()) {
auto &dst_page = *dst_iter;
- auto dbegin = std::max(dstoff, dst_page->offset);
+ auto dbegin = MAX(dstoff, dst_page->offset);
auto dend = std::min(dstoff + count, dst_page->offset + dst_page_size);
std::fill(dst_page->data + dbegin - dst_page->offset,
dst_page->data + dend - dst_page->offset, 0);
op->delta_stats = delta_stats;
op->version = at_version;
op->trim_to = trim_to;
- op->roll_forward_to = MAX(roll_forward_to, committed_to);
+ op->roll_forward_to = std::max(roll_forward_to, committed_to);
op->log_entries = log_entries;
std::swap(op->updated_hit_set_history, hset_history);
op->on_local_applied_sync = on_local_applied_sync;
trimmed.substr_of(
bl,
read.get<0>() - adjusted.first,
- MIN(read.get<1>(),
+ std::min(read.get<1>(),
bl.length() - (read.get<0>() - adjusted.first)));
result.insert(
read.get<0>(), trimmed.length(), std::move(trimmed));
extent *ext = &*p;
++p;
- uint64_t extoff = MAX(ext->offset, offset);
- uint64_t extlen = MIN(
+ uint64_t extoff = std::max(ext->offset, offset);
+ uint64_t extlen = std::min(
ext->length - (extoff - ext->offset),
offset + length - extoff);
bool any = false;
for (auto& p : job->osdmap->get_pools()) {
for (unsigned ps = 0; ps < p.second.get_pg_num(); ps += pgs_per_item) {
- unsigned ps_end = MIN(ps + pgs_per_item, p.second.get_pg_num());
+ unsigned ps_end = std::min(ps + pgs_per_item, p.second.get_pg_num());
job->start_one();
wq.queue(new Item(job, p.first, ps, ps_end));
ldout(cct, 20) << __func__ << " " << job << " " << p.first << " [" << ps
static pair<epoch_t, epoch_t> get_required_past_interval_bounds(
const pg_info_t &info,
epoch_t oldest_map) {
- epoch_t start = MAX(
+ epoch_t start = std::max(
info.history.last_epoch_clean ? info.history.last_epoch_clean :
info.history.epoch_pool_created,
oldest_map);
- epoch_t end = MAX(
+ epoch_t end = std::max(
info.history.same_interval_since,
info.history.epoch_pool_created);
return make_pair(start, end);
// version
// note: this is sorta busted, but should only be used for dirty buffers
- left->last_write_tid = MAX( left->last_write_tid, right->last_write_tid );
- left->last_write = MAX( left->last_write, right->last_write );
+ left->last_write_tid = std::max( left->last_write_tid, right->last_write_tid );
+ left->last_write = std::max( left->last_write, right->last_write );
left->set_dontneed(right->get_dontneed() ? left->get_dontneed() : false);
left->set_nocache(right->get_nocache() ? left->get_nocache() : false);
if (p->first <= cur) {
// have part of it
- loff_t lenfromcur = MIN(p->second->end() - cur, left);
+ loff_t lenfromcur = std::min(p->second->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
++p;
ceph_abort();
}
- loff_t lenfromcur = MIN(e->end() - cur, left);
+ loff_t lenfromcur = std::min(e->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
++p;
// gap.. miss
loff_t next = p->first;
BufferHead *n = new BufferHead(this);
- loff_t len = MIN(next - cur, left);
+ loff_t len = std::min(next - cur, left);
n->set_start(cur);
n->set_length(len);
oc->bh_add(this,n);
missing[cur] = n;
ldout(oc->cct, 20) << "map_read gap " << *n << dendl;
}
- cur += MIN(left, n->length());
- left -= MIN(left, n->length());
+ cur += std::min(left, n->length());
+ left -= std::min(left, n->length());
continue; // more?
} else {
ceph_abort();
} else {
// gap!
loff_t next = p->first;
- loff_t glen = MIN(next - cur, max);
+ loff_t glen = std::min(next - cur, max);
ldout(oc->cct, 10) << "map_write gap " << cur << "~" << glen << dendl;
if (final) {
oc->bh_stat_sub(final);
if (success) {
ldout(cct, 10) << "readx missed, waiting on cache to complete "
<< waitfor_read.size() << " blocked reads, "
- << (MAX(rx_bytes, max_size) - max_size)
+ << (std::max(rx_bytes, max_size) - max_size)
<< " read bytes" << dendl;
waitfor_read.push_back(new C_RetryRead(this, rd, oset, onfinish,
*trace));
BufferHead *bh = bh_it->second;
assert(opos == (loff_t)(bh->start() + bhoff));
- uint64_t len = MIN(f_it->second - foff, bh->length() - bhoff);
+ uint64_t len = std::min(f_it->second - foff, bh->length() - bhoff);
ldout(cct, 10) << "readx rmap opos " << opos << ": " << *bh << " +"
<< bhoff << " frag " << f_it->first << "~"
<< f_it->second << " +" << foff << "~" << len
ceph::coarse_mono_time stamp = info->watch_valid_thru;
if (!info->watch_pending_async.empty())
- stamp = MIN(info->watch_valid_thru, info->watch_pending_async.front());
+ stamp = std::min(info->watch_valid_thru, info->watch_pending_async.front());
auto age = ceph::coarse_mono_clock::now() - stamp;
ldout(cct, 10) << __func__ << " " << info->linger_id
good_buf = NULL;
ret = posix_memalign((void **)&good_buf,
- MAX(writebdy, (int)sizeof(void *)),
+ std::max(writebdy, (int)sizeof(void *)),
file_info.st_size);
if (ret > 0) {
prterrcode("check_clone: posix_memalign(good_buf)", -ret);
temp_buf = NULL;
ret = posix_memalign((void **)&temp_buf,
- MAX(readbdy, (int)sizeof(void *)),
+ std::max(readbdy, (int)sizeof(void *)),
file_info.st_size);
if (ret > 0) {
prterrcode("check_clone: posix_memalign(temp_buf)", -ret);
original_buf[i] = get_random() % 256;
ret = posix_memalign((void **)&good_buf,
- MAX(writebdy, (int)sizeof(void *)), maxfilelen);
+ std::max(writebdy, (int)sizeof(void *)), maxfilelen);
if (ret > 0) {
if (ret == EINVAL)
prt("writebdy is not a suitable power of two\n");
memset(good_buf, '\0', maxfilelen);
ret = posix_memalign((void **)&temp_buf,
- MAX(readbdy, (int)sizeof(void *)), maxfilelen);
+ std::max(readbdy, (int)sizeof(void *)), maxfilelen);
if (ret > 0) {
if (ret == EINVAL)
prt("readbdy is not a suitable power of two\n");
}
}
- epoch_t start_full = MAX(osdmap.get_epoch() + 1, first);
+ epoch_t start_full = std::max(osdmap.get_epoch() + 1, first);
if (m->maps.size() > 0) {
map<epoch_t,bufferlist>::reverse_iterator rit;
ExtentList *block_list = new ExtentList(&extents, blk_size, alloc_size);
for (int64_t i = 0; i < total_blocks; i += alloc_size) {
bmap_test_assert(alloc->reserve_blocks(alloc_size) == true);
- allocated = alloc->alloc_blocks_dis_res(alloc_size, MIN(alloc_size, zone_size),
+ allocated = alloc->alloc_blocks_dis_res(alloc_size, std::min(alloc_size, zone_size),
0, block_list);
bmap_test_assert(alloc_size == allocated);
bmap_test_assert(block_list->get_extent_count() ==
<< "expected 0x" << expected.length() << " != actual 0x"
<< actual.length() << std::dec << dendl;
}
- auto len = MIN(expected.length(), actual.length());
+ auto len = std::min(expected.length(), actual.length());
while ( first<len && expected[first] == actual[first])
++first;
unsigned last = len;
void add_dups(uint a, uint b) {
log.dups.push_back(create_dup_entry(a, b));
- write_from_dups = MIN(write_from_dups, log.dups.back().version);
+ write_from_dups = std::min(write_from_dups, log.dups.back().version);
}
void add_dups(const std::vector<pg_log_dup_t>& l) {
for (auto& i : l) {
log.dups.push_back(i);
- write_from_dups = MIN(write_from_dups, log.dups.back().version);
+ write_from_dups = std::min(write_from_dups, log.dups.back().version);
}
}
const bufferlist& obj_bl = obj_i->second;
dout(1) << "reading " << oid << " from total size " << obj_bl.length() << dendl;
- uint64_t read_len = MIN(len, obj_bl.length()-off);
+ uint64_t read_len = std::min(len, obj_bl.length()-off);
data_bl->substr_of(obj_bl, off, read_len);
return 0;
}
void scribble(librbd::ImageCtx *image_ctx, int num_ops, size_t max_size)
{
- max_size = MIN(image_ctx->size, max_size);
+ max_size = std::min(image_ctx->size, max_size);
for (int i=0; i<num_ops; i++) {
uint64_t off = rand() % (image_ctx->size - max_size + 1);
uint64_t len = 1 + rand() % max_size;
rgw_raw_obj test_raw = rgw_obj_select(*iter).get_raw_obj(env.zonegroup, env.zone_params);
ASSERT_TRUE(obj == test_raw);
- ofs = MIN(ofs + gen->cur_stripe_max_size(), obj_size);
+ ofs = std::min(ofs + gen->cur_stripe_max_size(), obj_size);
gen->create_next(ofs);
cout << "obj=" << obj << " *iter=" << *iter << std::endl;
}
ASSERT_TRUE(iter == test_objs->end());
ASSERT_EQ(manifest->get_obj_size(), obj_size);
- ASSERT_EQ(manifest->get_head_size(), MIN(obj_size, head_max_size));
+ ASSERT_EQ(manifest->get_head_size(), std::min(obj_size, head_max_size));
ASSERT_EQ(manifest->has_tail(), (obj_size > head_max_size));
}
+ (i % guessed_layout.stripe_count)
* guessed_layout.stripe_unit + (osize - 1)
% guessed_layout.stripe_unit + 1;
- incomplete_size = MAX(incomplete_size, upper_size);
+ incomplete_size = std::max(incomplete_size, upper_size);
}
} else if (r == -ENOENT) {
// Absent object, treat as size 0 and ignore.