- runtime
validator: |
[](std::string *value, std::string *error_message) {
- stringstream ss;
+ std::stringstream ss;
uint64_t features = librbd::rbd_features_from_string(*value, &ss);
// Leave this in integer form to avoid breaking Cinder. Someday
// we would like to present this in string form instead...
- runtime
validator: |
[](std::string *value, std::string *error_message) {
- ostringstream ss;
+ std::ostringstream ss;
uint64_t exclude_ops = librbd::io::rbd_io_operations_from_string(*value, &ss);
// Leave this in integer form to avoid breaking Cinder. Someday
// we would like to present this in string form instead...
: block_start(block_start), block_end(block_end) {
}
- friend ostream& operator<< (ostream& os, const BlockExtent& block_extent) {
+ friend std::ostream& operator<< (std::ostream& os, const BlockExtent& block_extent) {
os << "[block_start = " << block_extent.block_start << ", "
<< "block_end = " << block_extent.block_end << ")";
return os;
}
};
- static const string METADATA_CONF_PREFIX;
+ static const std::string METADATA_CONF_PREFIX;
CephContext *cct;
ConfigProxy config;
uint64_t get_current_size() const;
uint64_t get_object_size() const;
- string get_object_name(uint64_t num) const;
+ std::string get_object_name(uint64_t num) const;
uint64_t get_stripe_unit() const;
uint64_t get_stripe_count() const;
uint64_t get_stripe_period() const;
int get_parent_overlap(librados::snap_t in_snap_id,
uint64_t *overlap) const;
void register_watch(Context *on_finish);
- uint64_t prune_parent_extents(vector<pair<uint64_t,uint64_t> >& objectx,
+ uint64_t prune_parent_extents(std::vector<std::pair<uint64_t,uint64_t> >& objectx,
uint64_t overlap);
void cancel_async_requests();
std::uniform_int_distribution<uint32_t> distribution{0, 0xFFFFFFFF};
uint32_t extra = distribution(generator);
- ostringstream bid_ss;
+ std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
std::string id = bid_ss.str();
void unblock_notifies();
std::string get_oid() const;
- void set_oid(const string& oid);
+ void set_oid(const std::string& oid);
uint64_t get_watch_handle() const {
std::shared_lock watch_locker{m_watch_lock};
while (left > 0) {
uint64_t period_off = off - (off % period);
- uint64_t read_len = min(period_off + period - off, left);
+ uint64_t read_len = std::min(period_off + period - off, left);
if (fast_diff_enabled) {
// map to extents
- map<object_t,vector<ObjectExtent> > object_extents;
+ std::map<object_t,std::vector<ObjectExtent> > object_extents;
Striper::file_to_extents(cct, m_image_ctx.format_string,
&m_image_ctx.layout, off, read_len, 0,
object_extents, 0);
uint64_t bid = rados.get_instance_id();
uint32_t extra = rand() % 0xFFFFFFFF;
- ostringstream bid_ss;
+ std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
return bid_ss.str();
}
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Image: " << __func__ << ": "
+using std::map;
+using std::string;
using librados::snap_t;
namespace librbd {
};
std::unique_lock image_locker{ictx->image_lock};
- map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
+ std::map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
for (auto &info : snap_info) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
}
if (next_mirror_mode == cls::rbd::MIRROR_MODE_POOL) {
- map<string, string> images;
+ std::map<std::string, std::string> images;
r = Image<I>::list_images_v2(io_ctx, &images);
if (r < 0) {
lderr(cct) << "failed listing images: " << cpp_strerror(r) << dendl;
return r;
}
- vector<string> names;
+ std::vector<std::string> names;
r = Namespace<I>::list(io_ctx, &names);
if (r < 0) {
return r;
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
int r;
- map<string, string> id_to_name;
+ std::map<std::string, std::string> id_to_name;
{
- map<string, string> name_to_id;
+ std::map<std::string, std::string> name_to_id;
r = Image<I>::list_images_v2(io_ctx, &name_to_id);
if (r < 0) {
return r;
}
}
- map<std::string, cls::rbd::MirrorImage> images_;
- map<std::string, cls::rbd::MirrorImageStatus> statuses_;
+ std::map<std::string, cls::rbd::MirrorImage> images_;
+ std::map<std::string, cls::rbd::MirrorImageStatus> statuses_;
r = librbd::cls_client::mirror_image_status_list(&io_ctx, start_id, max,
&images_, &statuses_);
entries->clear();
while (entries->size() < max) {
- map<std::string, cls::rbd::MirrorImage> images;
- map<std::string, cls::rbd::MirrorImageStatus> statuses;
+ std::map<std::string, cls::rbd::MirrorImage> images;
+ std::map<std::string, cls::rbd::MirrorImageStatus> statuses;
int r = librbd::cls_client::mirror_image_status_list(&io_ctx, last_read,
max, &images,
}
template <typename I>
-int Namespace<I>::list(IoCtx& io_ctx, vector<string> *names)
+int Namespace<I>::list(IoCtx& io_ctx, std::vector<std::string> *names)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << dendl;
return r;
}
- string group_header_oid = util::group_header_name(snap_namespace.group_id);
+ std::string group_header_oid = util::group_header_name(snap_namespace.group_id);
r = cls_client::group_snap_get_by_id(&group_ioctx,
group_header_oid,
snap_namespace.group_snapshot_id,
}
template <typename I>
-int Snapshot<I>::list(I *ictx, vector<snap_info_t>& snaps) {
+int Snapshot<I>::list(I *ictx, std::vector<snap_info_t>& snaps) {
ldout(ictx->cct, 20) << "snap_list " << ictx << dendl;
int r = ictx->state->refresh_if_required();
uint32_t max_read = 1024;
std::string last_read;
do {
- std::map<string, cls::rbd::TrashImageSpec> trash_entries;
+ std::map<std::string, cls::rbd::TrashImageSpec> trash_entries;
int r = cls_client::trash_list(&io_ctx, last_read, max_read,
&trash_entries);
if (r < 0 && r != -ENOENT) {
}
template <typename I>
-int Trash<I>::list(IoCtx &io_ctx, vector<trash_image_info_t> &entries,
+int Trash<I>::list(IoCtx &io_ctx, std::vector<trash_image_info_t> &entries,
bool exclude_user_remove_source) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << __func__ << " " << &io_ctx << dendl;
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ObjectCacherWriteback: "
+using namespace std;
+
namespace librbd {
namespace cache {
#define dout_prefix *_dout << "librbd::cache::ParentCacheObjectDispatch: " \
<< this << " " << __func__ << ": "
+using namespace std;
using namespace ceph::immutable_obj_cache;
using librbd::util::data_object_name;
namespace cache {
namespace pwl {
+using namespace std;
using namespace librbd::cache::pwl;
typedef AbstractWriteLog<ImageCtx>::Extent Extent;
namespace cache {
namespace pwl {
+using namespace std;
+
namespace {
bool get_json_format(const std::string& s, JSONFormattable *f) {
JSONParser p;
}
std::ostream &WriteLogOperation::format(std::ostream &os) const {
- string op_name = is_writesame ? "(Write Same) " : "(Write) ";
+ std::string op_name = is_writesame ? "(Write Same) " : "(Write) ";
os << op_name;
GenericWriteLogOperation::format(os);
os << ", ";
#define dout_prefix *_dout << "librbd::cache::pwl::Request: " << this << " " \
<< __func__ << ": "
+using namespace std;
+
namespace librbd {
namespace cache {
namespace pwl {
bufferlist cmp_bl;
bufferlist read_bl;
bool is_comp_and_write = false;
- unique_ptr<WriteLogOperationSet> op_set = nullptr;
+ std::unique_ptr<WriteLogOperationSet> op_set = nullptr;
C_WriteRequest(T &pwl, const utime_t arrived, io::Extents &&image_extents,
bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
f->dump_unsigned("entry_index", entry_index);
}
-void WriteLogCacheEntry::generate_test_instances(list<WriteLogCacheEntry*>& ls) {
+void WriteLogCacheEntry::generate_test_instances(std::list<WriteLogCacheEntry*>& ls) {
ls.push_back(new WriteLogCacheEntry);
ls.push_back(new WriteLogCacheEntry);
ls.back()->sync_gen_number = 1;
f->dump_unsigned("first_free_entry", first_free_entry);
f->dump_unsigned("first_valid_entry", first_valid_entry); }
-void WriteLogPoolRoot::generate_test_instances(list<WriteLogPoolRoot*>& ls) {
+void WriteLogPoolRoot::generate_test_instances(std::list<WriteLogPoolRoot*>& ls) {
ls.push_back(new WriteLogPoolRoot);
ls.push_back(new WriteLogPoolRoot);
ls.back()->layout_version = 2;
}
#endif
void dump(ceph::Formatter *f) const;
- static void generate_test_instances(list<WriteLogCacheEntry*>& ls);
+ static void generate_test_instances(std::list<WriteLogCacheEntry*>& ls);
};
struct WriteLogPoolRoot {
#endif
void dump(ceph::Formatter *f) const;
- static void generate_test_instances(list<WriteLogPoolRoot*>& ls);
+ static void generate_test_instances(std::list<WriteLogPoolRoot*>& ls);
};
struct WriteBufferAllocation {
namespace librbd {
namespace cache {
namespace pwl {
+using namespace std;
using namespace librbd::cache::pwl;
namespace rwl {
f->dump_object("super", root);
}
- static void generate_test_instances(list<SuperBlock*>& ls) {
+ static void generate_test_instances(std::list<SuperBlock*>& ls) {
ls.push_back(new SuperBlock);
ls.push_back(new SuperBlock);
ls.back()->root.first_valid_entry = 2;
namespace pwl {
namespace ssd {
+using namespace std;
using namespace librbd::cache::pwl;
// SSD: this number can be updated later
// Make a bl for this hit extent. This will add references to the
// write_entry->cache_bl */
ldout(m_image_ctx.cct, 5) << dendl;
- auto write_entry = static_pointer_cast<WriteLogEntry>(map_entry.log_entry);
+ auto write_entry = std::static_pointer_cast<WriteLogEntry>(map_entry.log_entry);
buffer::list hit_bl;
hit_bl = write_entry->get_cache_bl();
bool writesame = write_entry->is_writesame_entry();
ldout(m_cct, 15) << dendl;
ceph_assert(m_data_pool.empty() || m_data_pool_id != -1);
- ostringstream oss;
+ std::ostringstream oss;
oss << RBD_DATA_PREFIX;
if (m_data_pool_id != -1) {
oss << stringify(m_io_ctx.get_id()) << ".";
#define rbd_howmany(x, y) (((x) + (y) - 1) / (y))
+using std::istringstream;
using std::map;
using std::pair;
using std::set;
}
}
- uint64_t len = min(period, src_size - offset);
+ uint64_t len = std::min(period, src_size - offset);
bufferlist *bl = new bufferlist();
auto ctx = new C_CopyRead(&throttle, dest, offset, bl, sparse_size);
auto comp = io::AioCompletion::create_and_start<Context>(
start_time = coarse_mono_clock::now();
while (left > 0) {
uint64_t period_off = off - (off % period);
- uint64_t read_len = min(period_off + period - off, left);
+ uint64_t read_len = std::min(period_off + period - off, left);
bufferlist bl;
int invalidate_cache(ImageCtx *ictx);
int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp);
- int metadata_list(ImageCtx *ictx, const string &last, uint64_t max, map<string, bufferlist> *pairs);
+ int metadata_list(ImageCtx *ictx, const std::string &last, uint64_t max,
+ std::map<std::string, bufferlist> *pairs);
int metadata_get(ImageCtx *ictx, const std::string &key, std::string *value);
int list_watchers(ImageCtx *ictx, std::list<librbd::image_watcher_t> &watchers);
SafeTimer *timer;
ceph::mutex *timer_lock;
ImageCtx::get_timer_instance(cct, &timer, &timer_lock);
- for (auto flag : throttle_flags) {
- m_throttles.push_back(make_pair(
- flag.first,
- new TokenBucketThrottle(cct, flag.second, 0, 0, timer, timer_lock)));
+ for (auto [flag, name] : throttle_flags) {
+ m_throttles.emplace_back(
+ flag,
+ new TokenBucketThrottle(cct, name, 0, 0, timer, timer_lock));
}
}
struct MetadataSetEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_METADATA_SET;
- string key;
- string value;
+ std::string key;
+ std::string value;
MetadataSetEvent() {
}
- MetadataSetEvent(uint64_t op_tid, const string &_key, const string &_value)
+ MetadataSetEvent(uint64_t op_tid, const std::string &_key, const std::string &_value)
: OpEventBase(op_tid), key(_key), value(_value) {
}
struct MetadataRemoveEvent : public OpEventBase {
static const EventType TYPE = EVENT_TYPE_METADATA_REMOVE;
- string key;
+ std::string key;
MetadataRemoveEvent() {
}
- MetadataRemoveEvent(uint64_t op_tid, const string &_key)
+ MetadataRemoveEvent(uint64_t op_tid, const std::string &_key)
: OpEventBase(op_tid), key(_key) {
}
#undef dout_prefix
#define dout_prefix *_dout << "librbd: "
+using std::list;
+using std::map;
+using std::pair;
+using std::set;
using std::string;
using std::vector;
#define dout_prefix *_dout << "librbd::managed_lock::ReleaseRequest: " \
<< this << " " << __func__ << ": "
+using std::string;
+
namespace librbd {
namespace managed_lock {
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
if (pspec.pool_id != -1) {
- map<uint64_t, SnapInfo>::iterator it;
+ std::map<uint64_t, SnapInfo>::iterator it;
for (it = image_ctx.snap_info.begin();
it != image_ctx.snap_info.end(); ++it) {
// skip our snap id (if checking base image, CEPH_NOSNAP won't match)
ceph_assert(image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner());
- string oid = image_ctx.get_object_name(m_object_no);
+ std::string oid = image_ctx.get_object_name(m_object_no);
ldout(image_ctx.cct, 10) << "removing (with copyup) " << oid << dendl;
auto object_dispatch_spec = io::ObjectDispatchSpec::create_discard(
}
}
- string oid = image_ctx.get_object_name(m_object_no);
+ std::string oid = image_ctx.get_object_name(m_object_no);
ldout(image_ctx.cct, 10) << "removing " << oid << dendl;
librados::AioCompletion *rados_completion =
ContextCompletion *completion =
new ContextCompletion(this->create_async_callback_context(), true);
- for (vector<ObjectExtent>::iterator p = extents.begin();
- p != extents.end(); ++p) {
- ldout(cct, 20) << " ex " << *p << dendl;
+ for (auto& extent : extents) {
+ ldout(cct, 20) << " ex " << extent << dendl;
Context *req_comp = new C_ContextCompletion(*completion);
- if (p->offset == 0) {
+ if (extent.offset == 0) {
// treat as a full object delete on the boundary
- p->length = image_ctx.layout.object_size;
+ extent.length = image_ctx.layout.object_size;
}
auto object_dispatch_spec = io::ObjectDispatchSpec::create_discard(
- &image_ctx, io::OBJECT_DISPATCH_LAYER_NONE, p->objectno, p->offset,
- p->length, io_context, 0, 0, {}, req_comp);
+ &image_ctx, io::OBJECT_DISPATCH_LAYER_NONE, extent.objectno, extent.offset,
+ extent.length, io_context, 0, 0, {}, req_comp);
object_dispatch_spec->send();
}
completion->finish_adding_requests();