parent.pool = pool;
parent.id = id;
parent.snapid = snapid;
- parent.overlap = MIN(our_size, size);
+ parent.overlap = std::min(our_size, size);
encode(parent, parentbl);
r = cls_cxx_map_set_val(hctx, "parent", &parentbl);
if (r < 0) {
while (more && data.size() < max_return) {
map<string, bufferlist> raw_data;
- int max_read = MIN(RBD_MAX_KEYS_READ, max_return - data.size());
+ int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - data.size());
int r = cls_cxx_map_get_vals(hctx, last_read, RBD_METADATA_KEY_PREFIX,
max_read, &raw_data, &more);
if (r < 0) {
std::map<std::string, bufferlist> vals;
CLS_LOG(20, "last read: '%s'", last_read.c_str());
- int max_read = MIN(RBD_MAX_KEYS_READ, max_return - image_mapping->size());
+ int max_read = std::min<uint64_t>(RBD_MAX_KEYS_READ, max_return - image_mapping->size());
int r = cls_cxx_map_get_vals(hctx, last_read, MIRROR_IMAGE_MAP_KEY_PREFIX,
max_read, &vals, &more);
if (r < 0) {
// size object cache appropriately
uint64_t obj = cache_max_dirty_object;
if (!obj) {
- obj = MIN(2000, MAX(10, cache_size / 100 / sizeof(ObjectCacher::Object)));
+ obj = std::min<uint64_t>(2000,
+ std::max<uint64_t>(
+ 10, cache_size / 100 /
+ sizeof(ObjectCacher::Object)));
}
ldout(cct, 10) << " cache bytes " << cache_size
<< " -> about " << obj << " objects" << dendl;
size_t conf_prefix_len = prefix.size();
for (auto it : pairs) {
- if (it.first.compare(0, MIN(conf_prefix_len, it.first.size()), prefix) > 0)
+ if (it.first.compare(0, std::min(conf_prefix_len, it.first.size()), prefix) > 0)
return false;
if (it.first.size() <= conf_prefix_len)
CephContext *cct = m_image_ctx.cct;
// break very large requests into manageable batches
- m_update_end_object_no = MIN(
+ m_update_end_object_no = std::min(
m_end_object_no, m_update_start_object_no + MAX_OBJECTS_PER_UPDATE);
std::string oid(ObjectMap<>::object_map_name(m_image_ctx.id, m_snap_id));
ldout(m_image_ctx.cct, 20) << dendl;
auto it = m_object_map.begin() +
- MIN(m_update_start_object_no, m_object_map.size());
+ std::min(m_update_start_object_no, m_object_map.size());
auto end_it = m_object_map.begin() +
- MIN(m_update_end_object_no, m_object_map.size());
+ std::min(m_update_end_object_no, m_object_map.size());
for (; it != end_it; ++it) {
auto state_ref = *it;
uint8_t state = state_ref;
int BufferReader::fetch(bufferlist::iterator **it) {
if (m_bl_it.get_remaining() < m_min_bytes) {
- ssize_t bytes_to_read = ROUND_UP_TO(m_max_bytes - m_bl_it.get_remaining(),
+ ssize_t bytes_to_read = round_up_to(m_max_bytes - m_bl_it.get_remaining(),
CEPH_PAGE_SIZE);
while (!m_eof_reached && bytes_to_read > 0) {
int r = m_bl.read_fd(m_fd, CEPH_PAGE_SIZE);
C_SaferCond ctx;
librbd::NoOpProgressContext prog_ctx;
- size -= MIN(size, 1<<18);
+ size -= std::min<uint64_t>(size, 1 << 18);
{
RWLock::RLocker l(ictx->owner_lock);
ictx->operations->execute_resize(size, true, prog_ctx, &ctx, 0);
while (size > 0) {
uint64_t new_size = original_size;
if (attempts++ % 2 == 0) {
- size -= MIN(size, 1<<18);
+ size -= std::min<uint64_t>(size, 1 << 18);
new_size = size;
}