fe->run();
- r = store->register_to_service_map("rgw-nfs", service_map_meta);
+ r = store->register_to_service_map(this, "rgw-nfs", service_map_meta);
if (r < 0) {
derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
/* ignore error */
}
do {
- ret = store->meta_list_keys_next(handle, max, user_ids,
+ ret = store->meta_list_keys_next(dpp(), handle, max, user_ids,
&truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: buckets limit check lists_keys_next(): "
struct rgw_log_entry entry;
// peek at first entry to get bucket metadata
- r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(h, &entry);
+ r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(dpp(), h, &entry);
if (r < 0) {
cerr << "error reading log " << oid << ": " << cpp_strerror(-r) << std::endl;
return -r;
formatter->flush(cout);
}
next:
- r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(h, &entry);
+ r = static_cast<rgw::sal::RadosStore*>(store)->getRados()->log_show_next(dpp(), h, &entry);
} while (r > 0);
if (r < 0) {
do {
list<string> keys;
left = (max_entries_specified ? max_entries - count : max);
- ret = store->meta_list_keys_next(handle, left, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp(), handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return -ret;
}
do {
list<string> keys;
- ret = store->meta_list_keys_next(handle, 1000, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, 1000, keys, &truncated);
if (ret < 0) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
store->meta_list_keys_complete(handle);
while (ret == 0 && truncated) {
std::list<std::string> buckets;
constexpr int max_keys = 1000;
- ret = store->meta_list_keys_next(handle, max_keys, buckets,
+ ret = store->meta_list_keys_next(dpp, handle, max_keys, buckets,
&truncated);
for (auto& bucket_name : buckets) {
if (show_stats) {
do {
list<std::string> keys;
- ret = store->meta_list_keys_next(handle, default_max_keys, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
});
do {
list<std::string> keys;
- ret = store->meta_list_keys_next(handle, default_max_keys, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl;
return ret;
return -EIO;
}
- op_ret = state->bucket->check_quota(user_quota, bucket_quota, real_ofs, null_yield, true);
+ op_ret = state->bucket->check_quota(this, user_quota, bucket_quota, real_ofs, null_yield, true);
/* max_size exceed */
if (op_ret < 0)
return -EIO;
goto done;
}
- op_ret = state->bucket->check_quota(user_quota, bucket_quota, state->obj_size, null_yield, true);
+ op_ret = state->bucket->check_quota(this, user_quota, bucket_quota, state->obj_size, null_yield, true);
/* max_size exceed */
if (op_ret < 0) {
goto done;
vector<rgw::sal::Lifecycle::LCEntry> entries;
string marker;
- dout(5) << "RGWLC::bucket_lc_prepare(): PREPARE "
+ ldpp_dout(this, 5) << "RGWLC::bucket_lc_prepare(): PREPARE "
<< "index: " << index << " worker ix: " << worker->ix
<< dendl;
return 0;
}
-static bool obj_has_expired(CephContext *cct, ceph::real_time mtime, int days,
+static bool obj_has_expired(const DoutPrefixProvider *dpp, CephContext *cct, ceph::real_time mtime, int days,
ceph::real_time *expire_time = nullptr)
{
double timediff, cmp;
*expire_time = mtime + make_timespan(cmp);
}
- ldout(cct, 20) << __func__ << __func__
+ ldpp_dout(dpp, 20) << __func__ << __func__
<< "(): mtime=" << mtime << " days=" << days
<< " base_time=" << base_time << " timediff=" << timediff
<< " cmp=" << cmp
list_params.marker = pre_obj.key;
int ret = fetch(dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: list_op returned ret=" << ret
+ ldpp_dout(dpp, 0) << "ERROR: list_op returned ret=" << ret
<< dendl;
return ret;
}
auto wt = boost::get<std::tuple<lc_op, rgw_bucket_dir_entry>>(wi);
auto& [rule, obj] = wt;
RGWMPObj mp_obj;
- if (obj_has_expired(cct, obj.meta.mtime, rule.mp_expiration)) {
+ if (obj_has_expired(this, cct, obj.meta.mtime, rule.mp_expiration)) {
rgw_obj_key key(obj.key);
if (!mp_obj.from_meta(key.name)) {
return;
++prefix_iter) {
if (worker_should_stop(stop_at, once)) {
- ldout(cct, 5) << __func__ << " interval budget EXPIRED worker "
+ ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker "
<< worker->ix
<< dendl;
return 0;
int ret = read_obj_tags(dpp, oc.obj.get(), oc.rctx, tags_bl);
if (ret < 0) {
if (ret != -ENODATA) {
- ldout(oc.cct, 5) << "ERROR: read_obj_tags returned r="
+ ldpp_dout(oc.dpp, 5) << "ERROR: read_obj_tags returned r="
<< ret << " " << oc.wq->thr_name() << dendl;
}
return 0;
auto iter = tags_bl.cbegin();
dest_obj_tags.decode(iter);
} catch (buffer::error& err) {
- ldout(oc.cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet "
+ ldpp_dout(oc.dpp,0) << "ERROR: caught buffer::error, couldn't decode TagSet "
<< oc.wq->thr_name() << dendl;
return -EIO;
}
if (! has_all_tags(op, dest_obj_tags)) {
- ldout(oc.cct, 20) << __func__ << "() skipping obj " << oc.obj
+ ldpp_dout(oc.dpp, 20) << __func__ << "() skipping obj " << oc.obj
<< " as tags do not match in rule: "
<< op.id << " "
<< oc.wq->thr_name() << dendl;
if (ret == -ENOENT) {
return false;
}
- ldout(oc.cct, 0) << "ERROR: check_tags on obj=" << oc.obj
+ ldpp_dout(oc.dpp, 0) << "ERROR: check_tags on obj=" << oc.obj
<< " returned ret=" << ret << " "
<< oc.wq->thr_name() << dendl;
return false;
ceph::real_clock::to_time_t(*op.expiration_date);
*exp_time = *op.expiration_date;
} else {
- is_expired = obj_has_expired(oc.cct, mtime, op.expiration, exp_time);
+ is_expired = obj_has_expired(dpp, oc.cct, mtime, op.expiration, exp_time);
}
ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired="
if (o.is_delete_marker()) {
r = remove_expired_obj(oc.dpp, oc, true);
if (r < 0) {
- ldout(oc.cct, 0) << "ERROR: current is-dm remove_expired_obj "
+ ldpp_dout(oc.dpp, 0) << "ERROR: current is-dm remove_expired_obj "
<< oc.bucket << ":" << o.key
<< " " << cpp_strerror(r) << " "
<< oc.wq->thr_name() << dendl;
return r;
}
- ldout(oc.cct, 2) << "DELETED: current is-dm "
+ ldpp_dout(oc.dpp, 2) << "DELETED: current is-dm "
<< oc.bucket << ":" << o.key
<< " " << oc.wq->thr_name() << dendl;
} else {
/* ! o.is_delete_marker() */
r = remove_expired_obj(oc.dpp, oc, !oc.bucket->versioned());
if (r < 0) {
- ldout(oc.cct, 0) << "ERROR: remove_expired_obj "
+ ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj "
<< oc.bucket << ":" << o.key
<< " " << cpp_strerror(r) << " "
<< oc.wq->thr_name() << dendl;
if (perfcounter) {
perfcounter->inc(l_rgw_lc_expire_current, 1);
}
- ldout(oc.cct, 2) << "DELETED:" << oc.bucket << ":" << o.key
+ ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key
<< " " << oc.wq->thr_name() << dendl;
}
return 0;
}
int expiration = oc.op.noncur_expiration;
- bool is_expired = obj_has_expired(oc.cct, oc.effective_mtime, expiration,
+ bool is_expired = obj_has_expired(dpp, oc.cct, oc.effective_mtime, expiration,
exp_time);
ldpp_dout(dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired="
auto& o = oc.o;
int r = remove_expired_obj(oc.dpp, oc, true);
if (r < 0) {
- ldout(oc.cct, 0) << "ERROR: remove_expired_obj (non-current expiration) "
+ ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj (non-current expiration) "
<< oc.bucket << ":" << o.key
<< " " << cpp_strerror(r)
<< " " << oc.wq->thr_name() << dendl;
if (perfcounter) {
perfcounter->inc(l_rgw_lc_expire_noncurrent, 1);
}
- ldout(oc.cct, 2) << "DELETED:" << oc.bucket << ":" << o.key
+ ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key
<< " (non-current expiration) "
<< oc.wq->thr_name() << dendl;
return 0;
auto& o = oc.o;
int r = remove_expired_obj(oc.dpp, oc, true);
if (r < 0) {
- ldout(oc.cct, 0) << "ERROR: remove_expired_obj (delete marker expiration) "
+ ldpp_dout(oc.dpp, 0) << "ERROR: remove_expired_obj (delete marker expiration) "
<< oc.bucket << ":" << o.key
<< " " << cpp_strerror(r)
<< " " << oc.wq->thr_name()
if (perfcounter) {
perfcounter->inc(l_rgw_lc_expire_dm, 1);
}
- ldout(oc.cct, 2) << "DELETED:" << oc.bucket << ":" << o.key
+ ldpp_dout(oc.dpp, 2) << "DELETED:" << oc.bucket << ":" << o.key
<< " (delete marker expiration) "
<< oc.wq->thr_name() << dendl;
return 0;
ceph::real_clock::to_time_t(*transition.date);
*exp_time = *transition.date;
} else {
- is_expired = obj_has_expired(oc.cct, mtime, transition.days, exp_time);
+ is_expired = obj_has_expired(dpp, oc.cct, mtime, transition.days, exp_time);
}
- ldout(oc.cct, 20) << __func__ << "(): key=" << o.key << ": is_expired="
+ ldpp_dout(oc.dpp, 20) << __func__ << "(): key=" << o.key << ": is_expired="
<< is_expired << " "
<< oc.wq->thr_name() << dendl;
++prefix_iter) {
if (worker_should_stop(stop_at, once)) {
- ldout(cct, 5) << __func__ << " interval budget EXPIRED worker "
+ ldpp_dout(this, 5) << __func__ << " interval budget EXPIRED worker "
<< worker->ix
<< dendl;
return 0;
obj_names[index],
cookie);
- dout(5) << "RGWLC::bucket_lc_post(): POST " << entry
+ ldpp_dout(this, 5) << "RGWLC::bucket_lc_post(): POST " << entry
<< " index: " << index << " worker ix: " << worker->ix
<< dendl;
auto now = time(nullptr);
- dout(16) << "RGWLC::expired_session"
+ ldpp_dout(this, 16) << "RGWLC::expired_session"
<< " started: " << started
<< " interval: " << interval << "(*2==" << 2*interval << ")"
<< " now: " << now
int RGWLC::process(int index, int max_lock_secs, LCWorker* worker,
bool once = false)
{
- dout(5) << "RGWLC::process(): ENTER: "
+ ldpp_dout(this, 5) << "RGWLC::process(): ENTER: "
<< "index: " << index << " worker ix: " << worker->ix
<< dendl;
if (ret >= 0) {
if (entry.status == lc_processing) {
if (expired_session(entry.start_time)) {
- dout(5) << "RGWLC::process(): STALE lc session found for: " << entry
+ ldpp_dout(this, 5) << "RGWLC::process(): STALE lc session found for: " << entry
<< " index: " << index << " worker ix: " << worker->ix
<< " (clearing)"
<< dendl;
} else {
- dout(5) << "RGWLC::process(): ACTIVE entry: " << entry
+ ldpp_dout(this, 5) << "RGWLC::process(): ACTIVE entry: " << entry
<< " index: " << index << " worker ix: " << worker->ix
<< dendl;
goto exit;
RGWObjTags::tag_map_t obj_tag_map = obj_tagset.get_tags();
if (cct->_conf->subsys.should_gather(ceph_subsys_rgw, 16)) {
for (const auto& elt : obj_tag_map) {
- ldout(cct, 16) << __func__
+ ldpp_dout(dpp, 16) << __func__
<< "() key=" << elt.first << " val=" << elt.second
<< dendl;
}
fes.push_back(fe);
}
- r = store->register_to_service_map("rgw", service_map_meta);
+ r = store->register_to_service_map(&dp, "rgw", service_map_meta);
if (r < 0) {
derr << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
return 0;
}
- int list_keys_next(void *handle, int max, list<string>& keys, bool *truncated) override {
+ int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, list<string>& keys, bool *truncated) override {
iter_data *data = static_cast<iter_data *>(handle);
for (int i = 0; i < max && data->iter != data->sections.end(); ++i, ++(data->iter)) {
keys.push_back(*data->iter);
return 0;
}
-int RGWMetadataHandler_GenericMetaBE::list_keys_next(void *handle, int max, list<string>& keys, bool *truncated)
+int RGWMetadataHandler_GenericMetaBE::list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, list<string>& keys, bool *truncated)
{
auto op = static_cast<RGWSI_MetaBackend_Handler::Op_ManagedCtx *>(handle);
- int ret = op->list_next(max, &keys, truncated);
+ int ret = op->list_next(dpp, max, &keys, truncated);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
return 0;
}
-int RGWMetadataManager::list_keys_next(void *handle, int max, list<string>& keys, bool *truncated)
+int RGWMetadataManager::list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, list<string>& keys, bool *truncated)
{
list_keys_handle *h = static_cast<list_keys_handle *>(handle);
RGWMetadataHandler *handler = h->handler;
- return handler->list_keys_next(h->handle, max, keys, truncated);
+ return handler->list_keys_next(dpp, h->handle, max, keys, truncated);
}
void RGWMetadataManager::list_keys_complete(void *handle)
std::function<int()> f) = 0;
virtual int list_keys_init(const DoutPrefixProvider *dpp, const string& marker, void **phandle) = 0;
- virtual int list_keys_next(void *handle, int max, list<string>& keys, bool *truncated) = 0;
+ virtual int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, list<string>& keys, bool *truncated) = 0;
virtual void list_keys_complete(void *handle) = 0;
virtual string get_marker(void *handle) = 0;
int get_shard_id(const string& entry, int *shard_id) override;
int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) override;
- int list_keys_next(void *handle, int max, std::list<string>& keys, bool *truncated) override;
+ int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, std::list<string>& keys, bool *truncated) override;
void list_keys_complete(void *handle) override;
std::string get_marker(void *handle) override;
int list_keys_init(const DoutPrefixProvider *dpp, const string& section, void **phandle);
int list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void **phandle);
- int list_keys_next(void *handle, int max, list<string>& keys, bool *truncated);
+ int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, list<string>& keys, bool *truncated);
void list_keys_complete(void *handle);
string get_marker(void *handle);
*shard = objexp_hint_get_shardname(shard_num);
}
-static int objexp_hint_parse(CephContext *cct, cls_timeindex_entry &ti_entry,
+static int objexp_hint_parse(const DoutPrefixProvider *dpp, CephContext *cct, cls_timeindex_entry &ti_entry,
objexp_hint_entry *hint_entry)
{
try {
auto iter = ti_entry.value.cbegin();
decode(*hint_entry, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode avail_pools" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode avail_pools" << dendl;
}
return 0;
ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \
<< " - " << iter->key_ext << dendl;
- int ret = objexp_hint_parse(store->ctx(), *iter, &hint);
+ int ret = objexp_hint_parse(dpp, store->ctx(), *iter, &hint);
if (ret < 0) {
ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
continue;
utime_t last_run;
do {
utime_t start = ceph_clock_now();
- ldout(cct, 2) << "object expiration: start" << dendl;
+ ldpp_dout(this, 2) << "object expiration: start" << dendl;
if (oe->inspect_all_shards(this, last_run, start)) {
/* All shards have been processed properly. Next time we can start
* from this moment. */
last_run = start;
}
- ldout(cct, 2) << "object expiration: stop" << dendl;
+ ldpp_dout(this, 2) << "object expiration: stop" << dendl;
if (oe->going_down())
static string mp_ns = RGW_OBJ_NS_MULTIPART;
static string shadow_ns = RGW_OBJ_NS_SHADOW;
-static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name);
+static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name);
static MultipartMetaFilter mp_filter;
return 0;
}
-static bool validate_cors_rule_method(RGWCORSRule *rule, const char *req_meth) {
+static bool validate_cors_rule_method(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_meth) {
uint8_t flags = 0;
if (!req_meth) {
- dout(5) << "req_meth is null" << dendl;
+ ldpp_dout(dpp, 5) << "req_meth is null" << dendl;
return false;
}
else if (strcmp(req_meth, "HEAD") == 0) flags = RGW_CORS_HEAD;
if (rule->get_allowed_methods() & flags) {
- dout(10) << "Method " << req_meth << " is supported" << dendl;
+ ldpp_dout(dpp, 10) << "Method " << req_meth << " is supported" << dendl;
} else {
- dout(5) << "Method " << req_meth << " is not supported" << dendl;
+ ldpp_dout(dpp, 5) << "Method " << req_meth << " is not supported" << dendl;
return false;
}
return true;
}
-static bool validate_cors_rule_header(RGWCORSRule *rule, const char *req_hdrs) {
+static bool validate_cors_rule_header(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs) {
if (req_hdrs) {
vector<string> hdrs;
get_str_vec(req_hdrs, hdrs);
for (const auto& hdr : hdrs) {
if (!rule->is_header_allowed(hdr.c_str(), hdr.length())) {
- dout(5) << "Header " << hdr << " is not registered in this rule" << dendl;
+ ldpp_dout(dpp, 5) << "Header " << hdr << " is not registered in this rule" << dendl;
return false;
}
}
* any of the values in list of headers do not set any additional headers and
* terminate this set of steps.
* */
-static void get_cors_response_headers(RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
+static void get_cors_response_headers(const DoutPrefixProvider *dpp, RGWCORSRule *rule, const char *req_hdrs, string& hdrs, string& exp_hdrs, unsigned *max_age) {
if (req_hdrs) {
list<string> hl;
get_str_list(req_hdrs, hl);
for(list<string>::iterator it = hl.begin(); it != hl.end(); ++it) {
if (!rule->is_header_allowed((*it).c_str(), (*it).length())) {
- dout(5) << "Header " << (*it) << " is not registered in this rule" << dendl;
+ ldpp_dout(dpp, 5) << "Header " << (*it) << " is not registered in this rule" << dendl;
} else {
if (hdrs.length() > 0) hdrs.append(",");
hdrs.append((*it));
if (req_meth) {
method = req_meth;
/* CORS 6.2.5. */
- if (!validate_cors_rule_method(rule, req_meth)) {
+ if (!validate_cors_rule_method(this, rule, req_meth)) {
return false;
}
}
const char *req_hdrs = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_HEADERS");
/* CORS 6.2.6. */
- get_cors_response_headers(rule, req_hdrs, headers, exp_headers, max_age);
+ get_cors_response_headers(this, rule, req_hdrs, headers, exp_headers, max_age);
return true;
}
string etag;
};
-static int iterate_slo_parts(CephContext *cct,
+static int iterate_slo_parts(const DoutPrefixProvider *dpp,
+ CephContext *cct,
rgw::sal::Store*store,
off_t ofs,
off_t end,
if (found_start) {
if (cb) {
- dout(20) << "iterate_slo_parts()"
+ ldpp_dout(dpp, 20) << "iterate_slo_parts()"
<< " obj=" << part.obj_name
<< " start_ofs=" << start_ofs
<< " end_ofs=" << end_ofs
<< " total=" << total_len
<< dendl;
- r = iterate_slo_parts(s->cct, store, ofs, end, slo_parts,
+ r = iterate_slo_parts(this, s->cct, store, ofs, end, slo_parts,
get_obj_user_manifest_iterate_cb, (void *)this);
if (r < 0) {
return r;
if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
we also check sizes at the end anyway */
- op_ret = s->bucket->check_quota(user_quota, bucket_quota, s->content_length, y);
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->content_length, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
return;
return;
}
- op_ret = s->bucket->check_quota(user_quota, bucket_quota, s->obj_size, y);
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->obj_size, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
return;
ceph::buffer::list bl, aclbl;
int len = 0;
- op_ret = s->bucket->check_quota(user_quota, bucket_quota, s->content_length, y);
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->content_length, y);
if (op_ret < 0) {
return;
}
s->object->set_obj_size(ofs);
- op_ret = s->bucket->check_quota(user_quota, bucket_quota, s->obj_size, y);
+ op_ret = s->bucket->check_quota(this, user_quota, bucket_quota, s->obj_size, y);
if (op_ret < 0) {
return;
}
if (!s->system_request) { // no quota enforcement for system requests
// enforce quota against the destination bucket owner
- op_ret = dest_bucket->check_quota(user_quota, bucket_quota,
+ op_ret = dest_bucket->check_quota(this, user_quota, bucket_quota,
astate->accounted_size, y);
if (op_ret < 0) {
return;
}
void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigned *max_age) {
- get_cors_response_headers(rule, req_hdrs, hdrs, exp_hdrs, max_age);
+ get_cors_response_headers(this, rule, req_hdrs, hdrs, exp_hdrs, max_age);
}
int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
return -ENOENT;
}
- if (!validate_cors_rule_method(rule, req_meth)) {
+ if (!validate_cors_rule_method(this, rule, req_meth)) {
return -ENOENT;
}
- if (!validate_cors_rule_header(rule, req_hdrs)) {
+ if (!validate_cors_rule_header(this, rule, req_hdrs)) {
return -ENOENT;
}
return 0;
}
-static void forward_req_info(CephContext *cct, req_info& info, const std::string& bucket_name)
+static void forward_req_info(const DoutPrefixProvider *dpp, CephContext *cct, req_info& info, const std::string& bucket_name)
{
/* the request of container or object level will contain bucket name.
* only at account level need to append the bucket name */
return;
}
- ldout(cct, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
+ ldpp_dout(dpp, 20) << "append the bucket: "<< bucket_name << " to req_info" << dendl;
info.script_uri.append("/").append(bucket_name);
info.request_uri_aws4 = info.request_uri = info.script_uri;
info.effective_uri = "/" + bucket_name;
new_bucket.name = bucket_name;
rgw_placement_rule placement_rule;
placement_rule.storage_class = s->info.storage_class;
- forward_req_info(s->cct, info, bucket_name);
+ forward_req_info(this, s->cct, info, bucket_name);
op_ret = store->create_bucket(this, s->user.get(), new_bucket,
store->get_zone()->get_zonegroup().get_id(),
return op_ret;
}
- op_ret = bucket->check_quota(user_quota, bucket_quota, size, y);
+ op_ret = bucket->check_quota(this, user_quota, bucket_quota, size, y);
if (op_ret < 0) {
return op_ret;
}
return op_ret;
}
- op_ret = bucket->check_quota(user_quota, bucket_quota, size, y);
+ op_ret = bucket->check_quota(this, user_quota, bucket_quota, size, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
return op_ret;
do {
list<string> keys;
- ret = store->meta_list_keys_next(handle, max, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, max, keys, &truncated);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << dendl;
return ret;
{
rgw::sal::Object::StatOp* front_op = ops.front().get();
- int ret = front_op->wait();
+ int ret = front_op->wait(dpp);
if (ret < 0) {
if (ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " << cpp_strerror(-ret) << dendl;
std::set<std::string> obj_oids;
std::unique_ptr<rgw::sal::Object::StatOp> front_op = std::move(ops.front());
- int ret = front_op->wait();
+ int ret = front_op->wait(dpp);
if (ret < 0) {
if (ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: stat_async() returned error: " <<
do {
std::list<std::string> buckets;
- ret = store->meta_list_keys_next(handle, max_keys, buckets, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, max_keys, buckets, &truncated);
for (std::string& bucket_id : buckets) {
ret = run(dpp, bucket_id);
do {
list<string> keys;
- ret = store->meta_list_keys_next(handle, max, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, max, keys, &truncated);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: lists_keys_next(): ret=" << ret << dendl;
goto done;
public:
virtual ~RGWQuotaInfoApplier() {}
- virtual bool is_size_exceeded(const char * const entity,
+ virtual bool is_size_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t size) const = 0;
- virtual bool is_num_objs_exceeded(const char * const entity,
+ virtual bool is_num_objs_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t num_objs) const = 0;
class RGWQuotaInfoDefApplier : public RGWQuotaInfoApplier {
public:
- bool is_size_exceeded(const char * const entity,
+ bool is_size_exceeded(const DoutPrefixProvider *dpp, const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t size) const override;
- bool is_num_objs_exceeded(const char * const entity,
+ bool is_num_objs_exceeded(const DoutPrefixProvider *dpp, const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t num_objs) const override;
class RGWQuotaInfoRawApplier : public RGWQuotaInfoApplier {
public:
- bool is_size_exceeded(const char * const entity,
+ bool is_size_exceeded(const DoutPrefixProvider *dpp, const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t size) const override;
- bool is_num_objs_exceeded(const char * const entity,
+ bool is_num_objs_exceeded(const DoutPrefixProvider *dpp, const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t num_objs) const override;
};
-bool RGWQuotaInfoDefApplier::is_size_exceeded(const char * const entity,
+bool RGWQuotaInfoDefApplier::is_size_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t size) const
const uint64_t new_size = rgw_rounded_objsize(size);
if (cur_size + new_size > static_cast<uint64_t>(qinfo.max_size)) {
- dout(10) << "quota exceeded: stats.size_rounded=" << stats.size_rounded
+ ldpp_dout(dpp, 10) << "quota exceeded: stats.size_rounded=" << stats.size_rounded
<< " size=" << new_size << " "
<< entity << "_quota.max_size=" << qinfo.max_size << dendl;
return true;
return false;
}
-bool RGWQuotaInfoDefApplier::is_num_objs_exceeded(const char * const entity,
+bool RGWQuotaInfoDefApplier::is_num_objs_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t num_objs) const
}
if (stats.num_objects + num_objs > static_cast<uint64_t>(qinfo.max_objects)) {
- dout(10) << "quota exceeded: stats.num_objects=" << stats.num_objects
+ ldpp_dout(dpp, 10) << "quota exceeded: stats.num_objects=" << stats.num_objects
<< " " << entity << "_quota.max_objects=" << qinfo.max_objects
<< dendl;
return true;
return false;
}
-bool RGWQuotaInfoRawApplier::is_size_exceeded(const char * const entity,
+bool RGWQuotaInfoRawApplier::is_size_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t size) const
const uint64_t cur_size = stats.size;
if (cur_size + size > static_cast<uint64_t>(qinfo.max_size)) {
- dout(10) << "quota exceeded: stats.size=" << stats.size
+ ldpp_dout(dpp, 10) << "quota exceeded: stats.size=" << stats.size
<< " size=" << size << " "
<< entity << "_quota.max_size=" << qinfo.max_size << dendl;
return true;
return false;
}
-bool RGWQuotaInfoRawApplier::is_num_objs_exceeded(const char * const entity,
+bool RGWQuotaInfoRawApplier::is_num_objs_exceeded(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& qinfo,
const RGWStorageStats& stats,
const uint64_t num_objs) const
}
if (stats.num_objects + num_objs > static_cast<uint64_t>(qinfo.max_objects)) {
- dout(10) << "quota exceeded: stats.num_objects=" << stats.num_objects
+ ldpp_dout(dpp, 10) << "quota exceeded: stats.num_objects=" << stats.num_objects
<< " " << entity << "_quota.max_objects=" << qinfo.max_objects
<< dendl;
return true;
RGWBucketStatsCache bucket_stats_cache;
RGWUserStatsCache user_stats_cache;
- int check_quota(const char * const entity,
+ int check_quota(const DoutPrefixProvider *dpp,
+ const char * const entity,
const RGWQuotaInfo& quota,
const RGWStorageStats& stats,
const uint64_t num_objs,
const auto& quota_applier = RGWQuotaInfoApplier::get_instance(quota);
- ldout(store->ctx(), 20) << entity
+ ldpp_dout(dpp, 20) << entity
<< " quota: max_objects=" << quota.max_objects
<< " max_size=" << quota.max_size << dendl;
- if (quota_applier.is_num_objs_exceeded(entity, quota, stats, num_objs)) {
+ if (quota_applier.is_num_objs_exceeded(dpp, entity, quota, stats, num_objs)) {
return -ERR_QUOTA_EXCEEDED;
}
- if (quota_applier.is_size_exceeded(entity, quota, stats, size)) {
+ if (quota_applier.is_size_exceeded(dpp, entity, quota, stats, size)) {
return -ERR_QUOTA_EXCEEDED;
}
- ldout(store->ctx(), 20) << entity << " quota OK:"
+ ldpp_dout(dpp, 20) << entity << " quota OK:"
<< " stats.num_objects=" << stats.num_objects
<< " stats.size=" << stats.size << dendl;
return 0;
bucket_stats_cache(_store),
user_stats_cache(dpp, _store, quota_threads) {}
- int check_quota(const rgw_user& user,
+ int check_quota(const DoutPrefixProvider *dpp,
+ const rgw_user& user,
rgw_bucket& bucket,
RGWQuotaInfo& user_quota,
RGWQuotaInfo& bucket_quota,
if (ret < 0) {
return ret;
}
- ret = check_quota("bucket", bucket_quota, bucket_stats, num_objs, size);
+ ret = check_quota(dpp, "bucket", bucket_quota, bucket_stats, num_objs, size);
if (ret < 0) {
return ret;
}
if (ret < 0) {
return ret;
}
- ret = check_quota("user", user_quota, user_stats, num_objs, size);
+ ret = check_quota(dpp, "user", user_quota, user_stats, num_objs, size);
if (ret < 0) {
return ret;
}
user_stats_cache.adjust_stats(user, bucket, obj_delta, added_bytes, removed_bytes);
}
- void check_bucket_shards(uint64_t max_objs_per_shard, uint64_t num_shards,
+ void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard, uint64_t num_shards,
uint64_t num_objs, bool& need_resharding, uint32_t *suggested_num_shards) override
{
if (num_objs > num_shards * max_objs_per_shard) {
- ldout(store->ctx(), 0) << __func__ << ": resharding needed: stats.num_objects=" << num_objs
+ ldpp_dout(dpp, 0) << __func__ << ": resharding needed: stats.num_objects=" << num_objs
<< " shard max_objects=" << max_objs_per_shard * num_shards << dendl;
need_resharding = true;
if (suggested_num_shards) {
RGWQuotaHandler() {}
virtual ~RGWQuotaHandler() {
}
- virtual int check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket,
+ virtual int check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota,
uint64_t num_objs, uint64_t size, optional_yield y) = 0;
- virtual void check_bucket_shards(uint64_t max_objs_per_shard, uint64_t num_shards,
+ virtual void check_bucket_shards(const DoutPrefixProvider *dpp, uint64_t max_objs_per_shard, uint64_t num_shards,
uint64_t num_objs, bool& need_resharding, uint32_t *suggested_num_shards) = 0;
virtual void update_stats(const rgw_user& bucket_owner, rgw_bucket& bucket, int obj_delta, uint64_t added_bytes, uint64_t removed_bytes) = 0;
}
}
-void RGWRados::wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<string> >& shard_ids)
+void RGWRados::wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, map<int, set<string> >& shard_ids)
{
- ldout(ctx(), 20) << __func__ << ": source_zone=" << source_zone << ", shard_ids=" << shard_ids << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": source_zone=" << source_zone << ", shard_ids=" << shard_ids << dendl;
std::lock_guard l{data_sync_thread_lock};
auto iter = data_sync_processor_threads.find(source_zone);
if (iter == data_sync_processor_threads.end()) {
- ldout(ctx(), 10) << __func__ << ": couldn't find sync thread for zone " << source_zone << ", skipping async data sync processing" << dendl;
+ ldpp_dout(dpp, 10) << __func__ << ": couldn't find sync thread for zone " << source_zone << ", skipping async data sync processing" << dendl;
return;
}
IoCtx ioctx;
int r = open_pool_ctx(dpp, pool, ioctx, false);
if (r < 0) {
- ldout(cct, 0) << "ERROR: open_pool_ctx() returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: open_pool_ctx() returned " << r << dendl;
return r;
}
bool requires;
r = ioctx.pool_requires_alignment2(&requires);
if (r < 0) {
- ldout(cct, 0) << "ERROR: ioctx.pool_requires_alignment2() returned "
+ ldpp_dout(dpp, 0) << "ERROR: ioctx.pool_requires_alignment2() returned "
<< r << dendl;
return r;
}
uint64_t align;
r = ioctx.pool_required_alignment2(&align);
if (r < 0) {
- ldout(cct, 0) << "ERROR: ioctx.pool_required_alignment2() returned "
+ ldpp_dout(dpp, 0) << "ERROR: ioctx.pool_required_alignment2() returned "
<< r << dendl;
return r;
}
if (align != 0) {
- ldout(cct, 20) << "required alignment=" << align << dendl;
+ ldpp_dout(dpp, 20) << "required alignment=" << align << dendl;
}
*alignment = align;
return 0;
return ret;
}
-int RGWRados::register_to_service_map(const string& daemon_type, const map<string, string>& meta)
+int RGWRados::register_to_service_map(const DoutPrefixProvider *dpp, const string& daemon_type, const map<string, string>& meta)
{
string name = cct->_conf->name.get_id();
if (name.compare(0, 4, "rgw.") == 0) {
stringify(rados.get_instance_id()),
metadata);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: service_daemon_register() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: service_daemon_register() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
-int RGWRados::update_service_map(std::map<std::string, std::string>&& status)
+int RGWRados::update_service_map(const DoutPrefixProvider *dpp, std::map<std::string, std::string>&& status)
{
int ret = rados.service_daemon_update_status(move(status));
if (ret < 0) {
- ldout(cct, 0) << "ERROR: service_daemon_update_status() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: service_daemon_update_status() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
-int RGWRados::log_show_next(RGWAccessHandle handle, rgw_log_entry *entry)
+int RGWRados::log_show_next(const DoutPrefixProvider *dpp, RGWAccessHandle handle, rgw_log_entry *entry)
{
log_show_state *state = static_cast<log_show_state *>(handle);
off_t off = state->p.get_off();
- ldout(cct, 10) << "log_show_next pos " << state->pos << " bl " << state->bl.length()
+ ldpp_dout(dpp, 10) << "log_show_next pos " << state->pos << " bl " << state->bl.length()
<< " off " << off
<< " eof " << (int)state->eof
<< dendl;
state->p = state->bl.cbegin();
if ((unsigned)r < chunk)
state->eof = true;
- ldout(cct, 10) << " read " << r << dendl;
+ ldpp_dout(dpp, 10) << " read " << r << dendl;
}
if (state->p.end())
return ret;
}
-int RGWRados::decode_policy(bufferlist& bl, ACLOwner *owner)
+int RGWRados::decode_policy(const DoutPrefixProvider *dpp, bufferlist& bl, ACLOwner *owner)
{
auto i = bl.cbegin();
RGWAccessControlPolicy policy(cct);
try {
policy.decode_owner(i);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
*owner = policy.get_owner();
auto& zonegroup_conn_map = svc.zone->get_zonegroup_conn_map();
map<string, RGWRESTConn *>::iterator iter = zonegroup_conn_map.find(src_bucket_info->zonegroup);
if (iter == zonegroup_conn_map.end()) {
- ldout(cct, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
+ ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
auto& zone_conn_map = svc.zone->get_zone_conn_map();
auto iter = zone_conn_map.find(source_zone);
if (iter == zone_conn_map.end()) {
- ldout(cct, 0) << "could not find zone connection to zone: " << source_zone << dendl;
+ ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
if (extra_data_bl.length()) {
JSONParser jp;
if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) {
- ldout(cct, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
+ ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
return -EIO;
}
buf_to_hex(md5, CEPH_CRYPTO_MD5_DIGESTSIZE, md5_str);
tag.append(md5_str);
- ldout(store->ctx(), 10) << "generate_fake_tag new tag=" << tag << dendl;
+ ldpp_dout(dpp, 10) << "generate_fake_tag new tag=" << tag << dendl;
tag_bl.append(tag.c_str(), tag.size() + 1);
}
}
-int RGWRados::Object::Stat::wait()
+int RGWRados::Object::Stat::wait(const DoutPrefixProvider *dpp)
{
if (!state.completion) {
return state.ret;
return state.ret;
}
- return finish();
+ return finish(dpp);
}
-int RGWRados::Object::Stat::finish()
+int RGWRados::Object::Stat::finish(const DoutPrefixProvider *dpp)
{
map<string, bufferlist>::iterator iter = result.attrs.find(RGW_ATTR_MANIFEST);
if (iter != result.attrs.end()) {
decode(*result.manifest, biter);
} catch (buffer::error& err) {
RGWRados *store = source->get_store();
- ldout(store->ctx(), 0) << "ERROR: " << __func__ << ": failed to decode manifest" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": failed to decode manifest" << dendl;
return -EIO;
}
}
ACLOwner owner;
if (acl_bl && acl_bl->length()) {
- int ret = store->decode_policy(*acl_bl, &owner);
+ int ret = store->decode_policy(dpp, *acl_bl, &owner);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not decode policy ret=" << ret << dendl;
}
return 0;
}
-static int decode_olh_info(CephContext* cct, const bufferlist& bl, RGWOLHInfo *olh)
+static int decode_olh_info(const DoutPrefixProvider *dpp, CephContext* cct, const bufferlist& bl, RGWOLHInfo *olh)
{
try {
auto biter = bl.cbegin();
decode(*olh, biter);
return 0;
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode olh info" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode olh info" << dendl;
return -EIO;
}
}
auto olh_info = state.attrset.find(RGW_ATTR_OLH_INFO);
if (olh_info != state.attrset.end()) {
RGWOLHInfo info;
- int r = decode_olh_info(cct, olh_info->second, &info);
+ int r = decode_olh_info(dpp, cct, olh_info->second, &info);
if (r < 0) {
return r;
}
return -EINVAL;
}
- return decode_olh_info(cct, iter->second, olh);
+ return decode_olh_info(dpp, cct, iter->second, olh);
}
-void RGWRados::check_pending_olh_entries(map<string, bufferlist>& pending_entries,
+void RGWRados::check_pending_olh_entries(const DoutPrefixProvider *dpp, map<string, bufferlist>& pending_entries,
map<string, bufferlist> *rm_pending_entries)
{
map<string, bufferlist>::iterator iter = pending_entries.begin();
decode(pending_info, biter);
} catch (buffer::error& err) {
/* skipping bad entry, we could remove it but it might hide a bug */
- ldout(cct, 0) << "ERROR: failed to decode pending entry " << iter->first << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode pending entry " << iter->first << dendl;
++iter;
continue;
}
rgw_filter_attrset(state->attrset, RGW_ATTR_OLH_PENDING_PREFIX, &pending_entries);
map<string, bufferlist> rm_pending_entries;
- check_pending_olh_entries(pending_entries, &rm_pending_entries);
+ check_pending_olh_entries(dpp,pending_entries, &rm_pending_entries);
if (!rm_pending_entries.empty()) {
int ret = remove_olh_pending_entries(dpp, bucket_info, *state, olh_obj, rm_pending_entries);
}
RGWOLHInfo olh;
- int ret = decode_olh_info(cct, iter->second, &olh);
+ int ret = decode_olh_info(dpp, cct, iter->second, &olh);
if (ret < 0) {
return ret;
}
return ctx.iter.get_cursor().to_str();
}
-static int do_pool_iterate(CephContext* cct, RGWPoolIterCtx& ctx, uint32_t num,
+static int do_pool_iterate(const DoutPrefixProvider *dpp, CephContext* cct, RGWPoolIterCtx& ctx, uint32_t num,
vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter)
{
rgw_bucket_dir_entry e;
string oid = iter->get_oid();
- ldout(cct, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
+ ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
// fill it in with initial values; we may correct later
if (filter && !filter->filter(oid, oid))
return objs.size();
}
-int RGWRados::pool_iterate(RGWPoolIterCtx& ctx, uint32_t num, vector<rgw_bucket_dir_entry>& objs,
+int RGWRados::pool_iterate(const DoutPrefixProvider *dpp, RGWPoolIterCtx& ctx, uint32_t num, vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter)
{
// catch exceptions from NObjectIterator::operator++()
try {
- return do_pool_iterate(cct, ctx, num, objs, is_truncated, filter);
+ return do_pool_iterate(dpp, cct, ctx, num, objs, is_truncated, filter);
} catch (const std::system_error& e) {
int r = -e.code().value();
- ldout(cct, 10) << "NObjectIterator threw exception " << e.what()
+ ldpp_dout(dpp, 10) << "NObjectIterator threw exception " << e.what()
<< ", returning " << r << dendl;
return r;
} catch (const std::exception& e) {
- ldout(cct, 10) << "NObjectIterator threw exception " << e.what()
+ ldpp_dout(dpp, 10) << "NObjectIterator threw exception " << e.what()
<< ", returning -5" << dendl;
return -EIO;
}
}
RGWAccessListFilterPrefix filter(prefix_filter);
vector<rgw_bucket_dir_entry> objs;
- int r = pool_iterate(ctx.iter_ctx, max, objs, is_truncated, &filter);
+ int r = pool_iterate(dpp, ctx.iter_ctx, max, objs, is_truncated, &filter);
if (r < 0) {
if(r != -ENOENT)
ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
return 0;
}
-int RGWRados::bi_remove(BucketShard& bs)
+int RGWRados::bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = ref.pool.ioctx().remove(ref.obj.oid);
ret = 0;
}
if (ret < 0) {
- ldout(cct, 5) << "bs.index_ctx.remove(" << bs.bucket_obj << ") returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "bs.index_ctx.remove(" << bs.bucket_obj << ") returned ret=" << ret << dendl;
return ret;
}
}
iter = astate->attrset.find(RGW_ATTR_ACL);
if (iter != astate->attrset.end()) {
- r = decode_policy(iter->second, &owner);
+ r = decode_policy(dpp, iter->second, &owner);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not decode policy for object: " << obj << dendl;
}
const uint64_t max_objs_per_shard =
cct->_conf.get_val<uint64_t>("rgw_max_objs_per_shard");
- quota_handler->check_bucket_shards(max_objs_per_shard, num_source_shards,
+ quota_handler->check_bucket_shards(dpp, max_objs_per_shard, num_source_shards,
num_objs, need_resharding, &suggested_num_shards);
if (! need_resharding) {
return 0;
return reshard.add(dpp, entry);
}
-int RGWRados::check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket,
+int RGWRados::check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota,
uint64_t obj_size, optional_yield y,
bool check_size_only)
{
// if we only check size, then num_objs will set to 0
if(check_size_only)
- return quota_handler->check_quota(bucket_owner, bucket, user_quota, bucket_quota, 0, obj_size, y);
+ return quota_handler->check_quota(dpp, bucket_owner, bucket, user_quota, bucket_quota, 0, obj_size, y);
- return quota_handler->check_quota(bucket_owner, bucket, user_quota, bucket_quota, 1, obj_size, y);
+ return quota_handler->check_quota(dpp, bucket_owner, bucket, user_quota, bucket_quota, 1, obj_size, y);
}
int RGWRados::get_target_shard_id(const rgw::bucket_index_normal_layout& layout, const string& obj_key,
int initialize(const DoutPrefixProvider *dpp);
void finalize();
- int register_to_service_map(const string& daemon_type, const map<string, string>& meta);
- int update_service_map(std::map<std::string, std::string>&& status);
+ int register_to_service_map(const DoutPrefixProvider *dpp, const string& daemon_type, const map<string, string>& meta);
+ int update_service_map(const DoutPrefixProvider *dpp, std::map<std::string, std::string>&& status);
/// list logs
int log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle);
/// show log
int log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle);
- int log_show_next(RGWAccessHandle handle, rgw_log_entry *entry);
+ int log_show_next(const DoutPrefixProvider *dpp, RGWAccessHandle handle, rgw_log_entry *entry);
// log bandwidth info
int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info);
explicit Stat(RGWRados::Object *_source) : source(_source) {}
int stat_async(const DoutPrefixProvider *dpp);
- int wait();
+ int wait(const DoutPrefixProvider *dpp);
int stat();
private:
- int finish();
+ int finish(const DoutPrefixProvider *dpp);
};
};
int delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, bool check_empty = true);
void wakeup_meta_sync_shards(set<int>& shard_ids);
- void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<string> >& shard_ids);
+ void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, map<int, set<string> >& shard_ids);
RGWMetaSyncStatusManager* get_meta_sync_manager();
RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone);
int unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr);
- void check_pending_olh_entries(map<string, bufferlist>& pending_entries, map<string, bufferlist> *rm_pending_entries);
+ void check_pending_olh_entries(const DoutPrefixProvider *dpp, map<string, bufferlist>& pending_entries, map<string, bufferlist> *rm_pending_entries);
int remove_olh_pending_entries(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs);
int follow_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjectCtx& ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target);
int get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh);
RGWObjectCtx *rctx = static_cast<RGWObjectCtx *>(ctx);
rctx->set_prefetch_data(obj);
}
- int decode_policy(bufferlist& bl, ACLOwner *owner);
+ int decode_policy(const DoutPrefixProvider *dpp, bufferlist& bl, ACLOwner *owner);
int get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, string *bucket_ver, string *master_ver,
map<RGWObjCategory, RGWStorageStats>& stats, string *max_marker, bool* syncstopped = NULL);
int get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, int shard_id, RGWGetBucketStats_CB *cb);
int bi_list(BucketShard& bs, const string& filter_obj, const string& marker, uint32_t max, list<rgw_cls_bi_entry> *entries, bool *is_truncated);
int bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket, const string& obj_name, const string& marker, uint32_t max,
list<rgw_cls_bi_entry> *entries, bool *is_truncated);
- int bi_remove(BucketShard& bs);
+ int bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs);
int cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid, rgw_usage_log_info& info);
int cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket, uint64_t start_epoch,
int fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key);
int fix_tail_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y);
- int check_quota(const rgw_user& bucket_owner, rgw_bucket& bucket,
+ int check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size,
optional_yield y, bool check_size_only = false);
* filter: if not NULL, will be used to filter returned objects
* Returns: 0 on success, -ERR# otherwise.
*/
- int pool_iterate(RGWPoolIterCtx& ctx, uint32_t num, vector<rgw_bucket_dir_entry>& objs,
+ int pool_iterate(const DoutPrefixProvider *dpp, RGWPoolIterCtx& ctx, uint32_t num, vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter);
uint64_t next_bucket_id();
}
}
- int r = store->register_to_service_map("rgw", service_map_meta);
+ int r = store->register_to_service_map(&dp, "rgw", service_map_meta);
if (r < 0) {
ldpp_dout(&dp, -1) << "ERROR: failed to register to service map: " << cpp_strerror(-r) << dendl;
}
}
- store->wakeup_data_sync_shards(source_zone, updated_shards);
+ store->wakeup_data_sync_shards(this, source_zone, updated_shards);
op_ret = 0;
}
do {
list<string> keys;
left = (max_entries_specified ? max_entries - count : max);
- op_ret = store->meta_list_keys_next(handle, left, keys, &truncated);
+ op_ret = store->meta_list_keys_next(this, handle, left, keys, &truncated);
if (op_ret < 0) {
ldpp_dout(this, 5) << "ERROR: lists_keys_next(): " << cpp_strerror(op_ret)
<< dendl;
virtual int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info) = 0;
virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) = 0;
- virtual int register_to_service_map(const std::string& daemon_type,
+ virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const map<std::string, std::string>& meta) = 0;
virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) = 0;
virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, vector<rgw_bucket>& buckets, bool enabled) = 0;
optional_yield y) = 0;
virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) = 0;
virtual void wakeup_meta_sync_shards(set<int>& shard_ids) = 0;
- virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) = 0;
+ virtual void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) = 0;
virtual int clear_usage(const DoutPrefixProvider *dpp) = 0;
virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) = 0;
virtual int get_config_key_val(std::string name, bufferlist* bl) = 0;
virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) = 0;
- virtual int meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated) = 0;
+ virtual int meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, list<std::string>& keys, bool* truncated) = 0;
virtual void meta_list_keys_complete(void* handle) = 0;
virtual std::string meta_get_marker(void* handle) = 0;
virtual int meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, optional_yield y) = 0;
virtual User* get_owner(void) { return owner; };
virtual ACLOwner get_acl_owner(void) { return ACLOwner(info.owner); };
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) = 0;
- virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0;
+ virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) = 0;
virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) = 0;
virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) = 0;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
virtual ~StatOp() = default;
virtual int stat_async(const DoutPrefixProvider *dpp) = 0;
- virtual int wait() = 0;
+ virtual int wait(const DoutPrefixProvider *dpp) = 0;
};
Object()
return store->getRados()->check_bucket_empty(dpp, info, y);
}
-int RadosBucket::check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size,
+int RadosBucket::check_quota(const DoutPrefixProvider *dpp, RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size,
optional_yield y, bool check_size_only)
{
- return store->getRados()->check_quota(owner->get_id(), get_key(),
+ return store->getRados()->check_quota(dpp, owner->get_id(), get_key(),
user_quota, bucket_quota, obj_size, y, check_size_only);
}
<< "): " << cpp_strerror(-ret) << std::endl;
return ret;
}
- ret = store->getRados()->bi_remove(bs);
+ ret = store->getRados()->bi_remove(dpp, bs);
if (ret < 0) {
cerr << "ERROR: failed to remove bucket index object: "
<< cpp_strerror(-ret) << std::endl;
return ret;
}
-int RadosStore::register_to_service_map(const std::string& daemon_type,
+int RadosStore::register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const map<std::string, std::string>& meta)
{
- return rados->register_to_service_map(daemon_type, meta);
+ return rados->register_to_service_map(dpp, daemon_type, meta);
}
void RadosStore::get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota)
return ctl()->meta.mgr->list_keys_init(dpp, section, marker, phandle);
}
-int RadosStore::meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated)
+int RadosStore::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, list<std::string>& keys, bool* truncated)
{
- return ctl()->meta.mgr->list_keys_next(handle, max, keys, truncated);
+ return ctl()->meta.mgr->list_keys_next(dpp, handle, max, keys, truncated);
}
void RadosStore::meta_list_keys_complete(void* handle)
return parent_op.stat_async(dpp);
}
-int RadosObject::RadosStatOp::wait()
+int RadosObject::RadosStatOp::wait(const DoutPrefixProvider *dpp)
{
result.obj = source;
- int ret = parent_op.wait();
+ int ret = parent_op.wait(dpp);
if (ret < 0)
return ret;
RadosStatOp(RadosObject* _source, RGWObjectCtx* _rctx);
virtual int stat_async(const DoutPrefixProvider *dpp) override;
- virtual int wait() override;
+ virtual int wait(const DoutPrefixProvider *dpp) override;
};
RadosObject() = default;
virtual int remove_metadata(const DoutPrefixProvider* dpp, RGWObjVersionTracker* objv, optional_yield y) override;
virtual bool is_owner(User* user) override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
- virtual int check_quota(RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
+ virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuotaInfo& user_quota, RGWQuotaInfo& bucket_quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
virtual int set_instance_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) override;
virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
virtual int log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info) override;
virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override;
- virtual int register_to_service_map(const std::string& daemon_type,
+ virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const map<std::string, std::string>& meta) override;
virtual void get_quota(RGWQuotaInfo& bucket_quota, RGWQuotaInfo& user_quota) override;
virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, vector<rgw_bucket>& buckets, bool enabled) override;
optional_yield y) override;
virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) override;
virtual void wakeup_meta_sync_shards(set<int>& shard_ids) override { rados->wakeup_meta_sync_shards(shard_ids); }
- virtual void wakeup_data_sync_shards(const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) override { rados->wakeup_data_sync_shards(source_zone, shard_ids); }
+ virtual void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, map<int, set<std::string> >& shard_ids) override { rados->wakeup_data_sync_shards(dpp, source_zone, shard_ids); }
virtual int clear_usage(const DoutPrefixProvider *dpp) override { return rados->clear_usage(dpp); }
virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override;
virtual int get_config_key_val(std::string name, bufferlist* bl) override;
virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) override;
- virtual int meta_list_keys_next(void* handle, int max, list<std::string>& keys, bool* truncated) override;
+ virtual int meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, list<std::string>& keys, bool* truncated) override;
virtual void meta_list_keys_complete(void* handle) override;
virtual std::string meta_get_marker(void* handle) override;
virtual int meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, optional_yield y) override;
{
map<string, string> status;
status["current_sync"] = manager->get_active_names();
- int ret = store->update_service_map(std::move(status));
+ int ret = store->update_service_map(dpp, std::move(status));
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: update_service_map() returned ret=" << ret << dendl;
}
do {
// get the next key and marker
- r = mgr->list_keys_next(handle, 1, keys, &truncated);
+ r = mgr->list_keys_next(dpp, handle, 1, keys, &truncated);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
auto g = make_scope_guard([=] { mgr->list_keys_complete(handle); });
do {
// get the next key and marker
- r = mgr->list_keys_next(handle, 1, keys, &truncated);
+ r = mgr->list_keys_next(dpp, handle, 1, keys, &truncated);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
do {
std::list<std::string> keys;
left = op_state.max_entries - count;
- ret = store->meta_list_keys_next(handle, left, keys, &truncated);
+ ret = store->meta_list_keys_next(dpp, handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
return ret;
} if (ret != -ENOENT) {
int RGWSI_ConfigKey_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp)
{
- maybe_insecure_mon_conn = !svc.rados->check_secure_mon_conn();
+ maybe_insecure_mon_conn = !svc.rados->check_secure_mon_conn(dpp);
return 0;
}
optional_yield y) = 0;
virtual int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *ctx, const string& marker) = 0;
- virtual int list_next(RGWSI_MetaBackend::Context *ctx,
+ virtual int list_next(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *ctx,
int max, list<string> *keys,
bool *truncated) = 0;
virtual int list_get_marker(RGWSI_MetaBackend::Context *ctx,
int list_init(const DoutPrefixProvider *dpp, const string& marker) {
return be->list_init(dpp, be_ctx, marker);
}
- int list_next(int max, list<string> *keys,
+ int list_next(const DoutPrefixProvider *dpp, int max, list<string> *keys,
bool *truncated) {
- return be->list_next(be_ctx, max, keys, truncated);
+ return be->list_next(dpp, be_ctx, max, keys, truncated);
}
int list_get_marker(string *marker) {
return be->list_get_marker(be_ctx, marker);
return 0;
}
-int RGWSI_MetaBackend_SObj::list_next(RGWSI_MetaBackend::Context *_ctx,
+int RGWSI_MetaBackend_SObj::list_next(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
int max, list<string> *keys,
bool *truncated)
{
keys->clear();
- int ret = ctx->list.op->get_next(max, &oids, truncated);
+ int ret = ctx->list.op->get_next(dpp, max, &oids, truncated);
if (ret < 0 && ret != -ENOENT)
return ret;
if (ret == -ENOENT) {
optional_yield y) override;
int list_init(const DoutPrefixProvider *dpp, RGWSI_MetaBackend::Context *_ctx, const string& marker) override;
- int list_next(RGWSI_MetaBackend::Context *_ctx,
+ int list_next(const DoutPrefixProvider *dpp,
+ RGWSI_MetaBackend::Context *_ctx,
int max, list<string> *keys,
bool *truncated) override;
int list_get_marker(RGWSI_MetaBackend::Context *ctx,
params.mostly_omap);
}
-int RGWSI_RADOS::pool_iterate(librados::IoCtx& io_ctx,
+int RGWSI_RADOS::pool_iterate(const DoutPrefixProvider *dpp,
+ librados::IoCtx& io_ctx,
librados::NObjectIterator& iter,
uint32_t num, vector<rgw_bucket_dir_entry>& objs,
RGWAccessListFilter *filter,
rgw_bucket_dir_entry e;
string oid = iter->get_oid();
- ldout(cct, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
+ ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
// fill it in with initial values; we may correct later
if (filter && !filter->filter(oid, oid))
return ref.pool.ioctx().get_last_version();
}
-int RGWSI_RADOS::Pool::create()
+int RGWSI_RADOS::Pool::create(const DoutPrefixProvider *dpp)
{
librados::Rados *rad = rados_svc->get_rados_handle();
int r = rad->pool_create(pool.name.c_str());
if (r < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: pool_create returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: pool_create returned " << r << dendl;
return r;
}
librados::IoCtx io_ctx;
r = rad->ioctx_create(pool.name.c_str(), io_ctx);
if (r < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: ioctx_create returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: ioctx_create returned " << r << dendl;
return r;
}
r = io_ctx.application_enable(pg_pool_t::APPLICATION_NAME_RGW, false);
if (r < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: application_enable returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: application_enable returned " << r << dendl;
return r;
}
return 0;
}
-int RGWSI_RADOS::Pool::create(const vector<rgw_pool>& pools, vector<int> *retcodes)
+int RGWSI_RADOS::Pool::create(const DoutPrefixProvider *dpp, const vector<rgw_pool>& pools, vector<int> *retcodes)
{
vector<librados::PoolAsyncCompletion *> completions;
vector<int> rets;
c->wait();
r = c->get_return_value();
if (r < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: async pool_create returned " << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: async pool_create returned " << r << dendl;
error = true;
}
}
io_ctxs.emplace_back();
int ret = rad->ioctx_create(pool.name.c_str(), io_ctxs.back());
if (ret < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: ioctx_create returned " << ret << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: ioctx_create returned " << ret << dendl;
error = true;
}
retcodes->push_back(ret);
if (ret == -EOPNOTSUPP) {
ret = 0;
} else if (ret < 0) {
- ldout(rados_svc->cct, 0) << "WARNING: async application_enable returned " << ret
+ ldpp_dout(dpp, 0) << "WARNING: async application_enable returned " << ret
<< dendl;
error = true;
}
return 0;
}
-int RGWSI_RADOS::Pool::List::get_next(int max,
+int RGWSI_RADOS::Pool::List::get_next(const DoutPrefixProvider *dpp,
+ int max,
std::vector<string> *oids,
bool *is_truncated)
{
return -EINVAL;
}
vector<rgw_bucket_dir_entry> objs;
- int r = pool->rados_svc->pool_iterate(ctx.ioctx, ctx.iter, max, objs, ctx.filter, is_truncated);
+ int r = pool->rados_svc->pool_iterate(dpp, ctx.ioctx, ctx.iter, max, objs, ctx.filter, is_truncated);
if (r < 0) {
if(r != -ENOENT) {
- ldout(pool->rados_svc->cct, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
}
return r;
}
return h.mon_command(cmd, inbl, nullptr, nullptr);
}
-bool RGWSI_RADOS::check_secure_mon_conn() const
+bool RGWSI_RADOS::check_secure_mon_conn(const DoutPrefixProvider *dpp) const
{
AuthRegistry reg(cct);
std::vector<uint32_t> modes;
reg.get_supported_methods(CEPH_ENTITY_TYPE_MON, &methods, &modes);
- ldout(cct, 20) << __func__ << "(): auth registy supported: methods=" << methods << " modes=" << modes << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): auth registy supported: methods=" << methods << " modes=" << modes << dendl;
for (auto method : methods) {
if (!reg.is_secure_method(method)) {
- ldout(cct, 20) << __func__ << "(): method " << method << " is insecure" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): method " << method << " is insecure" << dendl;
return false;
}
}
for (auto mode : modes) {
if (!reg.is_secure_mode(mode)) {
- ldout(cct, 20) << __func__ << "(): mode " << mode << " is insecure" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): mode " << mode << " is insecure" << dendl;
return false;
}
}
private:
int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
const OpenParams& params = {});
- int pool_iterate(librados::IoCtx& ioctx,
+ int pool_iterate(const DoutPrefixProvider *dpp,
+ librados::IoCtx& ioctx,
librados::NObjectIterator& iter,
uint32_t num, vector<rgw_bucket_dir_entry>& objs,
RGWAccessListFilter *filter,
void shutdown() override;
uint64_t instance_id();
- bool check_secure_mon_conn() const;
+ bool check_secure_mon_conn(const DoutPrefixProvider *dpp) const;
RGWAsyncRadosProcessor *get_async_processor() {
return async_processor.get();
public:
Pool() {}
- int create();
- int create(const std::vector<rgw_pool>& pools, std::vector<int> *retcodes);
+ int create(const DoutPrefixProvider *dpp);
+ int create(const DoutPrefixProvider *dpp, const std::vector<rgw_pool>& pools, std::vector<int> *retcodes);
int lookup();
int open(const DoutPrefixProvider *dpp, const OpenParams& params = {});
List(Pool *_pool) : pool(_pool) {}
int init(const DoutPrefixProvider *dpp, const string& marker, RGWAccessListFilter *filter = nullptr);
- int get_next(int max,
+ int get_next(const DoutPrefixProvider *dpp, int max,
std::vector<string> *oids,
bool *is_truncated);
return source.core_svc->pool_list_objects_init(dpp, source.pool, marker, prefix, &ctx);
}
-int RGWSI_SysObj::Pool::Op::get_next(int max, vector<string> *oids, bool *is_truncated)
+int RGWSI_SysObj::Pool::Op::get_next(const DoutPrefixProvider *dpp, int max, vector<string> *oids, bool *is_truncated)
{
- return source.core_svc->pool_list_objects_next(ctx, max, oids, is_truncated);
+ return source.core_svc->pool_list_objects_next(dpp, ctx, max, oids, is_truncated);
}
int RGWSI_SysObj::Pool::Op::get_marker(string *marker)
Op(Pool& _source) : source(_source) {}
int init(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& prefix);
- int get_next(int max, std::vector<string> *oids, bool *is_truncated);
+ int get_next(const DoutPrefixProvider *dpp, int max, std::vector<string> *oids, bool *is_truncated);
int get_marker(string *marker);
};
do {
vector<string> oids;
#define MAX_OBJS_DEFAULT 1000
- int r = op.get_next(MAX_OBJS_DEFAULT, &oids, &is_truncated);
+ int r = op.get_next(dpp, MAX_OBJS_DEFAULT, &oids, &is_truncated);
if (r < 0) {
return r;
}
return 0;
}
-int RGWSI_SysObj_Core::pool_list_objects_next(RGWSI_SysObj::Pool::ListCtx& _ctx,
+int RGWSI_SysObj_Core::pool_list_objects_next(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj::Pool::ListCtx& _ctx,
int max,
vector<string> *oids,
bool *is_truncated)
return -EINVAL;
}
auto& ctx = static_cast<PoolListImplInfo&>(*_ctx.impl);
- int r = ctx.op.get_next(max, oids, is_truncated);
+ int r = ctx.op.get_next(dpp, max, oids, is_truncated);
if (r < 0) {
if(r != -ENOENT)
- ldout(cct, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
return r;
}
const std::string& marker,
const std::string& prefix,
RGWSI_SysObj::Pool::ListCtx *ctx);
- virtual int pool_list_objects_next(RGWSI_SysObj::Pool::ListCtx& ctx,
+ virtual int pool_list_objects_next(const DoutPrefixProvider *dpp,
+ RGWSI_SysObj::Pool::ListCtx& ctx,
int max,
vector<string> *oids,
bool *is_truncated);
pools.push_back(rgw_pool(s));
vector<int> retcodes;
bufferlist bl;
- ret = rados_svc->pool().create(pools, &retcodes);
+ ret = rados_svc->pool().create(dpp, pools, &retcodes);
if (ret < 0)
return ret;
ret = sysobj.omap().set(dpp, s, bl, y);
rados.emplace(g_ceph_context);
const NoDoutPrefix no_dpp(g_ceph_context, 1);
ASSERT_EQ(0, rados->start(null_yield, &no_dpp));
- int r = rados->pool({poolname}).create();
+ int r = rados->pool({poolname}).create(&no_dpp);
if (r == -EEXIST)
r = 0;
ASSERT_EQ(0, r);