This commit adds dpp to the create bucket log.
Signed-off-by: Kalpesh Pandya <kapandya@redhat.com>
*/
RGWOp *op = (req->op) ? req->op : dynamic_cast<RGWOp*>(req);
if (! op) {
- dout(1) << "failed to derive cognate RGWOp (invalid op?)" << dendl;
+ ldpp_dout(op, 1) << "failed to derive cognate RGWOp (invalid op?)" << dendl;
return -EINVAL;
}
/* XXX and -then- stash req_state pointers everywhere they are needed */
ret = req->init(rgw_env, &rados_ctx, io, s);
if (ret < 0) {
- dout(10) << "failed to initialize request" << dendl;
+ ldpp_dout(op, 10) << "failed to initialize request" << dendl;
abort_req(s, op, ret);
goto done;
}
ret = op->verify_permission(null_yield);
if (ret < 0) {
if (s->system_request) {
- dout(2) << "overriding permissions due to system operation" << dendl;
+ ldpp_dout(op, 2) << "overriding permissions due to system operation" << dendl;
} else if (s->auth.identity->is_admin_of(s->user->get_id())) {
- dout(2) << "overriding permissions due to admin operation" << dendl;
+ ldpp_dout(op, 2) << "overriding permissions due to admin operation" << dendl;
} else {
abort_req(s, op, ret);
goto done;
ldpp_dout(s, 2) << "http status=" << http_ret << dendl;
- dout(1) << "====== " << __func__
+ ldpp_dout(op, 1) << "====== " << __func__
<< " req done req=" << hex << req << dec << " http_status="
<< http_ret
<< " ======" << dendl;
*/
RGWOp *op = (req->op) ? req->op : dynamic_cast<RGWOp*>(req);
if (! op) {
- dout(1) << "failed to derive cognate RGWOp (invalid op?)" << dendl;
+ ldpp_dout(op, 1) << "failed to derive cognate RGWOp (invalid op?)" << dendl;
return -EINVAL;
}
int ret = req->init(rgw_env, &rados_ctx, &io_ctx, s);
if (ret < 0) {
- dout(10) << "failed to initialize request" << dendl;
+ ldpp_dout(op, 10) << "failed to initialize request" << dendl;
abort_req(s, op, ret);
goto done;
}
ret = op->verify_permission(null_yield);
if (ret < 0) {
if (s->system_request) {
- dout(2) << "overriding permissions due to system operation" << dendl;
+ ldpp_dout(op, 2) << "overriding permissions due to system operation" << dendl;
} else if (s->auth.identity->is_admin_of(s->user->get_id())) {
- dout(2) << "overriding permissions due to admin operation" << dendl;
+ ldpp_dout(op, 2) << "overriding permissions due to admin operation" << dendl;
} else {
abort_req(s, op, ret);
goto done;
return -EINVAL;
}
- int ret = req->exec_finish(op);
+ int ret = req->exec_finish();
int op_ret = op->get_ret();
ldpp_dout(op, 1) << "====== " << __func__
dest_owner.set_id(owner->get_id());
dest_owner.set_name(owner_info.display_name);
- ldout(cct, 20) << "owner id=" << owner->get_id() << dendl;
- ldout(cct, 20) << "dest owner id=" << dest.get_owner().get_id() << dendl;
+ ldpp_dout(dpp, 20) << "owner id=" << owner->get_id() << dendl;
+ ldpp_dout(dpp, 20) << "dest owner id=" << dest.get_owner().get_id() << dendl;
RGWAccessControlList& dst_acl = dest.get_acl();
string email;
rgw_user u;
if (!src_grant.get_id(u)) {
- ldout(cct, 0) << "ERROR: src_grant.get_id() failed" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: src_grant.get_id() failed" << dendl;
return -EINVAL;
}
email = u.id;
{
if (type.get_type() == ACL_TYPE_CANON_USER) {
if (!src_grant.get_id(uid)) {
- ldout(cct, 0) << "ERROR: src_grant.get_id() failed" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: src_grant.get_id() failed" << dendl;
err_msg = "Invalid id";
return -EINVAL;
}
grant_ok = true;
rgw_user new_id;
new_grant.get_id(new_id);
- ldout(cct, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl;
+ ldpp_dout(dpp, 10) << "new grant: " << new_id << ":" << grant_user.display_name << dendl;
}
}
break;
if (ACLGrant_S3::group_to_uri(src_grant.get_group(), uri)) {
new_grant = src_grant;
grant_ok = true;
- ldout(cct, 10) << "new grant: " << uri << dendl;
+ ldpp_dout(dpp, 10) << "new grant: " << uri << dendl;
} else {
- ldout(cct, 10) << "bad grant group:" << (int)src_grant.get_group() << dendl;
+ ldpp_dout(dpp, 10) << "bad grant group:" << (int)src_grant.get_group() << dendl;
err_msg = "Invalid group uri";
return -EINVAL;
}
{
for (const auto& uid : uids) {
boost::optional<ACLGrant> grant;
- ldout(cct, 20) << "trying to add grant for ACL uid=" << uid << dendl;
+ ldpp_dout(dpp, 20) << "trying to add grant for ACL uid=" << uid << dendl;
/* Let's check whether the item has a separator potentially indicating
* a special meaning (like an HTTP referral-based grant). */
std::vector<std::string> uids;
int r = parse_list(read_list, uids);
if (r < 0) {
- ldout(cct, 0) << "ERROR: parse_list for read returned r="
+ ldpp_dout(dpp, 0) << "ERROR: parse_list for read returned r="
<< r << dendl;
return r;
}
std::vector<std::string> uids;
int r = parse_list(write_list, uids);
if (r < 0) {
- ldout(cct, 0) << "ERROR: parse_list for write returned r="
+ ldpp_dout(dpp, 0) << "ERROR: parse_list for write returned r="
<< r << dendl;
return r;
}
JSONParser parser;
if (!parser.parse(acl_str.c_str(), acl_str.length())) {
- ldout(cct, 0) << "ERROR: JSONParser::parse returned error=" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: JSONParser::parse returned error=" << dendl;
return false;
}
auto biter = bl.cbegin();
decode(manifest, biter);
} catch (buffer::error& err) {
- ldout(store->ctx(), 0) << "ERROR: failed to decode manifest" << dendl;
+ ldpp_dout(dpp(), 0) << "ERROR: failed to decode manifest" << dendl;
return -EIO;
}
rgw_bucket hint_bucket;
int ret = init_bucket(b, hint_bucket_info, hint_bucket);
if (ret < 0) {
- ldout(store->ctx(), 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl;
+ ldpp_dout(dpp(), 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl;
continue;
}
RGWBucketSyncPolicyHandlerRef hint_bucket_handler;
int r = store->ctl()->bucket->get_sync_policy_handler(zid, hint_bucket, &hint_bucket_handler, null_yield, dpp());
if (r < 0) {
- ldout(store->ctx(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl;
+ ldpp_dout(dpp(), 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl;
continue;
}
pins[1].c_str());
if (rc != OATH_INVALID_OTP) {
*pofs = time_ofs - step_size + step_size * totp.window / 2;
- ldout(cct, 20) << "found at time=" << start_time - time_ofs << " time_ofs=" << time_ofs << dendl;
+ ldpp_dout(dpp(), 20) << "found at time=" << start_time - time_ofs << " time_ofs=" << time_ofs << dendl;
return 0;
}
}
if (min_rewrite_stripe_size > 0) {
ret = check_min_obj_stripe_size(store, bucket_info, &obj, min_rewrite_stripe_size, &need_rewrite);
if (ret < 0) {
- ldout(store->ctx(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl;
+ ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << ret << dendl;
}
}
if (need_rewrite) {
return -ret;
}
} else {
- ldout(store->ctx(), 20) << "skipped object" << dendl;
+ ldpp_dout(dpp(), 20) << "skipped object" << dendl;
}
}
if (min_rewrite_stripe_size > 0) {
r = check_min_obj_stripe_size(store, bucket_info, &obj, min_rewrite_stripe_size, &need_rewrite);
if (r < 0) {
- ldout(store->ctx(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl;
+ ldpp_dout(dpp(), 0) << "WARNING: check_min_obj_stripe_size failed, r=" << r << dendl;
}
}
if (!need_rewrite) {
continue;
}
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: get obj state returned with error " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: get obj state returned with error " << ret << dendl;
return ret;
}
map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_ACL);
if (aiter == attrs.end()) {
// should never happen; only pre-argonaut buckets lacked this.
- ldout(store->ctx(), 0) << "WARNING: can't bucket link because no acl on bucket=" << old_bucket.name << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: can't bucket link because no acl on bucket=" << old_bucket.name << dendl;
set_err_msg(err_msg,
"While crossing the Anavros you have displeased the goddess Hera."
" You must sacrifice your ancient bucket " + bucket.bucket_id);
// now update the user for the bucket...
if (display_name.empty()) {
- ldout(store->ctx(), 0) << "WARNING: user " << user_info.user_id << " has no display name set" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: user " << user_info.user_id << " has no display name set" << dendl;
}
RGWAccessControlPolicy policy_instance;
std::make_move_iterator(other_instances.end()));
} else {
// all bets are off if we can't read the bucket, just return the sureshot stale instances
- lderr(store->ctx()) << "error: reading bucket info for bucket: "
+ ldpp_dout(dpp, -1) << "error: reading bucket info for bucket: "
<< bucket << cpp_strerror(-r) << dendl;
}
return;
RGWFormatterFlusher& flusher, bool dry_run)
{
if (bucket_info.bucket.bucket_id == bucket_info.bucket.marker) {
- lderr(store->ctx()) << "Not a resharded bucket skipping" << dendl;
+ ldpp_dout(dpp, -1) << "Not a resharded bucket skipping" << dendl;
return 0; // not a resharded bucket, move along
}
RGWSI_Bucket_EP_Ctx ctx(op->ctx());
- ldout(cct, 5) << "SKIP: bucket removal is not allowed on archive zone: bucket:" << entry << " ... proceeding to rename" << dendl;
+ ldpp_dout(dpp, 5) << "SKIP: bucket removal is not allowed on archive zone: bucket:" << entry << " ... proceeding to rename" << dendl;
string tenant_name, bucket_name;
parse_bucket(entry, &tenant_name, &bucket_name);
.set_attrs(&attrs_m)
.set_orig_info(&old_bi));
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to put new bucket instance info for bucket=" << new_bi.bucket << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket instance info for bucket=" << new_bi.bucket << " ret=" << ret << dendl;
return ret;
}
RGWBucketInfo info;
auto cct = svc.bucket->ctx();
- ldout(cct, 10) << "RGWRados::convert_old_bucket_info(): bucket=" << bucket << dendl;
+ ldpp_dout(dpp, 10) << "RGWRados::convert_old_bucket_info(): bucket=" << bucket << dendl;
int ret = svc.bucket->read_bucket_entrypoint_info(ctx.ep,
RGWSI_Bucket::get_entrypoint_meta_key(bucket),
{
int ret = ctl.user->remove_bucket(user_id, bucket, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: error removing bucket from directory: "
+ ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: "
<< cpp_strerror(-ret)<< dendl;
}
return 0;
if (ep.owner != user_id) {
- ldout(cct, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl;
+ ldpp_dout(dpp, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl;
return -EINVAL;
}
}
const auto& aiter = attrs.find(RGW_ATTR_ACL);
if (aiter == attrs.end()) {
- ldout(store->ctx(), 0) << "ERROR: no acls found for object " << obj.key.name << " .Continuing with next object." << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: no acls found for object " << obj.key.name << " .Continuing with next object." << dendl;
continue;
} else {
bufferlist& bl = aiter->second;
decode(policy, bl);
owner = policy.get_owner();
} catch (buffer::error& err) {
- ldout(store->ctx(), 0) << "ERROR: decode policy failed" << err.what()
+ ldpp_dout(dpp, 0) << "ERROR: decode policy failed" << err.what()
<< dendl;
return -EIO;
}
RGWBucketCtl::BucketInstance::GetParams().set_attrs(&attrs));
}
if (r < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to get bucket instance info for "
+ ldpp_dout(dpp, 0) << "ERROR: failed to get bucket instance info for "
<< bucket << dendl;
return r;
}
int ret = store->getRados()->get_obj_state(dpp, &obj_ctx, bucket_info, obj, &state, null_yield);
if (ret < 0) {
- ldout(store->ctx(), 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl;
return ret;
}
/* has there been any racing object write? */
if (del_if_older && (state->mtime > timestamp)) {
- ldout(store->ctx(), 20) << __func__ << "(): skipping object removal obj=" << obj << " (obj mtime=" << state->mtime << ", request timestamp=" << timestamp << ")" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): skipping object removal obj=" << obj << " (obj mtime=" << state->mtime << ", request timestamp=" << timestamp << ")" << dendl;
return 0;
}
try {
policy.decode(bliter);
} catch (buffer::error& err) {
- ldout(store->ctx(), 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
}
ret = del_op.delete_obj(null_yield, dpp);
if (ret < 0) {
- ldout(store->ctx(), 20) << __func__ << "(): delete_obj() obj=" << obj << " returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 20) << __func__ << "(): delete_obj() obj=" << obj << " returned ret=" << ret << dendl;
}
return ret;
}
if (existed) {
if (info.owner != user) {
- ldout(cct, 20) << "NOTICE: bucket already exists under a different user (bucket=" << bucket << " user=" << user << " bucket_owner=" << info.owner << dendl;
+ ldpp_dout(dpp, 20) << "NOTICE: bucket already exists under a different user (bucket=" << bucket << " user=" << user << " bucket_owner=" << info.owner << dendl;
return -EEXIST;
}
bucket = info.bucket;
/* if it exists (or previously existed), don't remove it! */
int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield, dpp);
if (r < 0) {
- ldout(cct, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl;
}
} else if (ret == -EEXIST || (ret == 0 && existed)) {
ret = -ERR_BUCKET_EXISTS;
}
if (ret < 0) {
- ldout(cct, 0) << "ERROR: bucket creation (bucket=" << bucket << ") return ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: bucket creation (bucket=" << bucket << ") return ret=" << ret << dendl;
}
return ret;
ret = obj->put(params.data, params.attrs, dpp, null_yield);
if (ret < 0) {
- lderr(cct) << "ERROR: put object returned error: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: put object returned error: " << cpp_strerror(-ret) << dendl;
}
return 0;
null_yield,
dpp);
if (r < 0) {
- lderr(cct) << "ERROR: " << __func__ << "(): get_sync_policy_handler() returned " << r << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(): get_sync_policy_handler() returned " << r << dendl;
return r;
}
sync_pipe.dest_bucket_info.flags = (sync_pipe.dest_bucket_info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED;
int op_ret = sync_env->store->getRados()->put_bucket_instance_info(sync_pipe.dest_bucket_info, false, real_time(), NULL, sync_env->dpp);
if (op_ret < 0) {
- ldout(sc->cct, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
+ ldpp_dout(sync_env->dpp, 0) << "SYNC_ARCHIVE: sync_object: error versioning archive bucket" << dendl;
return NULL;
}
}
std::unique_lock sl(status->lock);
- ldout(cct, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket.name
+ ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket.name
<< " shard_id=" << shard_id << " now=" << now
<< " cur_expiration=" << status->cur_expiration << dendl;
change.timestamp = now;
encode(change, bl);
- ldout(cct, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl;
+ ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl;
ret = be->push(index, now, change.key, std::move(bl));
return op_ret;
} /* exec_continue */
- int RGWWriteRequest::exec_finish(const DoutPrefixProvider *dpp)
+ int RGWWriteRequest::exec_finish()
{
buffer::list bl, aclbl, ux_key, ux_attrs;
map<string, string>::iterator iter;
cs_info.blocks = std::move(compressor->get_compression_blocks());
encode(cs_info, tmp);
attrs[RGW_ATTR_COMPRESSION] = tmp;
- ldout(state->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
+ ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
<< " with type=" << cs_info.compression_type
<< ", orig_size=" << cs_info.orig_size
<< ", blocks=" << cs_info.blocks.size() << dendl;
attrbl.append(val.c_str(), val.size() + 1);
}
- op_ret = rgw_get_request_metadata(dpp, state->cct, state->info, attrs);
+ op_ret = rgw_get_request_metadata(state->cct, state->info, attrs);
if (op_ret < 0) {
goto done;
}
int exec_start() override;
int exec_continue() override;
- int exec_finish(const DoutPrefixProvider *dpp) override;
+ int exec_finish() override;
void send_response() override {}
virtual int execute() final { ceph_abort(); }
virtual int exec_start() = 0;
virtual int exec_continue() = 0;
- virtual int exec_finish(const DoutPrefixProvider *dpp) = 0;
+ virtual int exec_finish() = 0;
}; /* RGWLibContinuedReq */
mp_obj.get_upload_id(), mp_obj.get_meta(),
1000, marker, obj_parts, &marker, &truncated);
if (ret < 0) {
- ldout(cct, 20) << __func__ << ": list_multipart_parts returned " <<
+ ldpp_dout(dpp, 20) << __func__ << ": list_multipart_parts returned " <<
ret << dendl;
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
}
/* use upload id as tag and do it synchronously */
ret = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id());
if (ret < 0) {
- ldout(cct, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
+ ldpp_dout(dpp, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
if (ret == -ENOENT) {
return -ERR_NO_SUCH_UPLOAD;
}
// and also remove the metadata obj
ret = del_op.delete_obj(null_yield, dpp);
if (ret < 0) {
- ldout(cct, 20) << __func__ << ": del_op.delete_obj returned " <<
+ ldpp_dout(dpp, 20) << __func__ << ": del_op.delete_obj returned " <<
ret << dendl;
}
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
ret = list_bucket_multiparts(dpp, store, bucket_info, prefix, marker, delim,
max, &objs, nullptr, &is_truncated);
if (ret < 0) {
- ldout(store->ctx(), 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR : calling list_bucket_multiparts; ret=" << ret <<
"; bucket=\"" << bucket_info.bucket << "\"; prefix=\"" <<
prefix << "\"; delim=\"" << delim << "\"" << dendl;
return ret;
}
- ldout(store->ctx(), 20) << __func__ <<
+ ldpp_dout(dpp, 20) << __func__ <<
" INFO: aborting and cleaning up multipart upload(s); bucket=\"" <<
bucket_info.bucket << "\"; objs.size()=" << objs.size() <<
"; is_truncated=" << is_truncated << dendl;
// we're doing a best-effort; if something cannot be found,
// log it and keep moving forward
if (ret != -ENOENT && ret != -ERR_NO_SUCH_UPLOAD) {
- ldout(store->ctx(), 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR : failed to abort and clean-up multipart upload \"" <<
key.get_oid() << "\"" << dendl;
return ret;
} else {
- ldout(store->ctx(), 10) << __func__ <<
+ ldpp_dout(dpp, 10) << __func__ <<
" NOTE : unable to find part(s) of "
"aborted multipart upload of \"" << key.get_oid() <<
"\" for cleaning up" << dendl;
num_deleted++;
}
if (num_deleted) {
- ldout(store->ctx(), 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" WARNING : aborted " << num_deleted <<
" incomplete multipart uploads" << dendl;
}
int ret = init_bucket_info(hint.tenant, hint.bucket_name,
hint.bucket_id, bucket_info);
if (-ENOENT == ret) {
- ldout(store->ctx(), 15) << "NOTICE: cannot find bucket = " \
+ ldpp_dout(dpp, 15) << "NOTICE: cannot find bucket = " \
<< hint.bucket_name << ". The object must be already removed" << dendl;
return -ERR_PRECONDITION_FAILED;
} else if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: could not init bucket = " \
+ ldpp_dout(dpp, 1) << "ERROR: could not init bucket = " \
<< hint.bucket_name << "due to ret = " << ret << dendl;
return ret;
}
++iter)
{
objexp_hint_entry hint;
- ldout(store->ctx(), 15) << "got removal hint for: " << iter->key_ts.sec() \
+ ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \
<< " - " << iter->key_ext << dendl;
int ret = objexp_hint_parse(store->getRados()->ctx(), *iter, &hint);
if (ret < 0) {
- ldout(store->ctx(), 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
+ ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
continue;
}
* We can silently ignore that and move forward. */
ret = garbage_single_object(dpp, hint);
if (ret == -ERR_PRECONDITION_FAILED) {
- ldout(store->ctx(), 15) << "not actual hint for object: " << hint.obj_key << dendl;
+ ldpp_dout(dpp, 15) << "not actual hint for object: " << hint.obj_key << dendl;
} else if (ret < 0) {
- ldout(store->ctx(), 1) << "cannot remove expired object: " << hint.obj_key << dendl;
+ ldpp_dout(dpp, 1) << "cannot remove expired object: " << hint.obj_key << dendl;
}
need_trim = true;
int ret = l.lock_exclusive(&store->getRados()->objexp_pool_ctx, shard);
if (ret == -EBUSY) { /* already locked by another processor */
- dout(5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
+ ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
return false;
}
num_entries, marker, entries,
&out_marker, &truncated);
if (ret < 0) {
- ldout(cct, 10) << "cannot get removal hints from shard: " << shard
+ ldpp_dout(dpp, 10) << "cannot get removal hints from shard: " << shard
<< dendl;
continue;
}
string shard;
objexp_get_shard(i, &shard);
- ldout(store->ctx(), 20) << "processing shard = " << shard << dendl;
+ ldpp_dout(dpp, 20) << "processing shard = " << shard << dendl;
if (! process_single_shard(dpp, shard, last_run, round_start)) {
all_done = false;
/* check to see the name is not used */
ret = read_url(dpp, idp_url, tenant);
if (exclusive && ret == 0) {
- ldout(cct, 0) << "ERROR: url " << provider_url << " already in use"
+ ldpp_dout(dpp, 0) << "ERROR: url " << provider_url << " already in use"
<< id << dendl;
return -EEXIST;
} else if ( ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading provider url " << provider_url << ": "
+ ldpp_dout(dpp, 0) << "failed reading provider url " << provider_url << ": "
<< cpp_strerror(-ret) << dendl;
return ret;
}
auto& pool = svc->zone->get_zone_params().oidc_pool;
ret = store_url(idp_url, exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing role info in pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: storing role info in pool: " << pool.name << ": "
<< provider_url << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
string url, tenant;
auto ret = get_tenant_url_from_arn(tenant, url);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed to parse arn" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl;
return -EINVAL;
}
if (this->tenant != tenant) {
- ldout(cct, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", "
+ ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", "
<< tenant << ": " << dendl;
return -EINVAL;
}
auto iter = bl.cbegin();
decode(*this, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
": " << url << dendl;
return -EIO;
}
list<string> oids;
int r = store->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
- ldout(ctl->cct, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": "
<< prefix << ": " << cpp_strerror(-r) << dendl;
return r;
}
auto iter = bl.cbegin();
decode(provider, iter);
} catch (buffer::error& err) {
- ldout(ctl->cct, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
": " << iter << dendl;
return -EIO;
}
if (ret < 0)
return ret;
} else {
- ldout(cct, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
std::unique_ptr<rgw::sal::RGWUser> user = store->get_user(bucket_info.owner);
/* object exists, but policy is broken */
int r = user->load_by_id(dpp, y);
return ret;
} else if (ret == -ENODATA) {
/* object exists, but policy is broken */
- ldout(cct, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: couldn't find acl header for object, generating default" << dendl;
std::unique_ptr<rgw::sal::RGWUser> user = store->get_user(bucket_info.owner);
ret = user->load_by_id(dpp, y);
if (ret < 0)
if (need_metadata_upload()) {
/* It's supposed that following functions WILL NOT change any special
* attributes (like RGW_ATTR_ACL) if they are already present in attrs. */
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
attrs.clear();
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
emplace_attr(RGW_ATTR_ETAG, std::move(bl));
populate_with_generic_attrs(s, attrs);
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
attrs.emplace(RGW_ATTR_ACL, std::move(acl_bl));
}
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
if (op_ret < 0) {
return op_ret;
}
return;
}
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs, false);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs, false);
if (op_ret < 0) {
return;
}
return;
}
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
dest_policy.encode(aclbl);
emplace_attr(RGW_ATTR_ACL, std::move(aclbl));
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
if (op_ret < 0) {
return op_ret;
}
if (op_ret != 0)
return;
- op_ret = rgw_get_request_metadata(this, s->cct, s->info, attrs);
+ op_ret = rgw_get_request_metadata(s->cct, s->info, attrs);
if (op_ret < 0) {
return;
}
* On failure returns a negative error code.
*
*/
-inline int rgw_get_request_metadata(const DoutPrefixProvider *dpp,
- CephContext* const cct,
+inline int rgw_get_request_metadata(CephContext* const cct,
struct req_info& info,
std::map<std::string, ceph::bufferlist>& attrs,
const bool allow_empty_attrs = true)
std::string& xattr = kv.second;
if (blocklisted_headers.count(name) == 1) {
- ldpp_dout(dpp, 10) << "skipping x>> " << name << dendl;
+ lsubdout(cct, rgw, 10) << "skipping x>> " << name << dendl;
continue;
} else if (allow_empty_attrs || !xattr.empty()) {
- ldpp_dout(dpp, 10) << "x>> " << name << ":" << xattr << dendl;
+ lsubdout(cct, rgw, 10) << "x>> " << name << ":" << xattr << dendl;
format_xattr(xattr);
std::string attr_name(RGW_ATTR_PREFIX);
int ret = rgw_bucket_parse_bucket_key(store->ctx(), bucket_instance_id,
&orphan_bucket, &shard_id);
if (ret < 0) {
- ldout(store->ctx(),0) << __func__ << " failed to parse bucket instance: "
+ ldpp_dout(dpp, 0) << __func__ << " failed to parse bucket instance: "
<< bucket_instance_id << " skipping" << dendl;
return ret;
}
/* probably raced with bucket removal */
return 0;
}
- lderr(store->ctx()) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl;
return ret;
}
if (cur_bucket_info.bucket.bucket_id != orphan_bucket.bucket_id) {
- ldout(store->ctx(), 0) << __func__ << ": Skipping stale bucket instance: "
+ ldpp_dout(dpp, 0) << __func__ << ": Skipping stale bucket instance: "
<< orphan_bucket.name << ": "
<< orphan_bucket.bucket_id << dendl;
return 0;
}
if (cur_bucket_info.reshard_status == cls_rgw_reshard_status::IN_PROGRESS) {
- ldout(store->ctx(), 0) << __func__ << ": reshard in progress. Skipping "
+ ldpp_dout(dpp, 0) << __func__ << ": reshard in progress. Skipping "
<< orphan_bucket.name << ": "
<< orphan_bucket.bucket_id << dendl;
return 0;
/* probably raced with bucket removal */
return 0;
}
- lderr(store->ctx()) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, -1) << __func__ << ": ERROR: RGWRados::get_bucket_instance_info() returned ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl;
+ ldpp_dout(dpp, 10) << "building linked oids for bucket instance: " << bucket_instance_id << dendl;
RGWRados::Bucket target(store->getRados(), bucket_info);
RGWRados::Bucket::List list_op(&target);
for (vector<rgw_bucket_dir_entry>::iterator iter = result.begin(); iter != result.end(); ++iter) {
rgw_bucket_dir_entry& entry = *iter;
if (entry.key.instance.empty()) {
- ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << dendl;
+ ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << dendl;
} else {
- ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << " [" << entry.key.instance << "]" << dendl;
+ ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << " [" << entry.key.instance << "]" << dendl;
}
- ldout(store->ctx(), 20) << __func__ << ": entry.key.name=" << entry.key.name << " entry.key.instance=" << entry.key.instance << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": entry.key.name=" << entry.key.name << " entry.key.instance=" << entry.key.instance << dendl;
if (!detailed_mode &&
entry.meta.accounted_size <= (uint64_t)store->ctx()->_conf->rgw_max_chunk_size) {
- ldout(store->ctx(),5) << __func__ << "skipping stat as the object " << entry.key.name
+ ldpp_dout(dpp, 5) << __func__ << "skipping stat as the object " << entry.key.name
<< "fits in a head" << dendl;
continue;
}
map<int, list<string> > oids;
map<int, string>::iterator iter = buckets_instance_index.find(search_stage.shard);
for (; iter != buckets_instance_index.end(); ++iter) {
- ldout(store->ctx(), 0) << "building linked oids index: " << iter->first << "/" << buckets_instance_index.size() << dendl;
+ ldpp_dout(dpp, 0) << "building linked oids index: " << iter->first << "/" << buckets_instance_index.size() << dendl;
bool truncated;
string oid = iter->second;
switch (search_stage.stage) {
case ORPHAN_SEARCH_STAGE_INIT:
- ldout(store->ctx(), 0) << __func__ << "(): initializing state" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): initializing state" << dendl;
search_stage = RGWOrphanSearchStage(ORPHAN_SEARCH_STAGE_LSPOOL);
r = save_state();
if (r < 0) {
}
// fall through
case ORPHAN_SEARCH_STAGE_LSPOOL:
- ldout(store->ctx(), 0) << __func__ << "(): building index of all objects in pool" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): building index of all objects in pool" << dendl;
r = build_all_oids_index();
if (r < 0) {
lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
// fall through
case ORPHAN_SEARCH_STAGE_LSBUCKETS:
- ldout(store->ctx(), 0) << __func__ << "(): building index of all bucket indexes" << dendl;
+ ldpp_dout(dpp, 0) << __func__ << "(): building index of all bucket indexes" << dendl;
r = build_buckets_instance_index();
if (r < 0) {
lderr(store->ctx()) << __func__ << ": ERROR: build_all_objs_index returned ret=" << r << dendl;
const std::string& prefix,
const std::set<rgw_obj_key>& entries_filter)
{
- ldout(store->ctx(), 10) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, 10) << "RGWRadosList::" << __func__ <<
" bucket_instance_id=" << bucket_instance_id <<
", prefix=" << prefix <<
", entries_filter.size=" << entries_filter.size() << dendl;
// probably raced with bucket removal
return 0;
}
- lderr(store->ctx()) << __func__ <<
+ ldpp_dout(dpp, -1) << __func__ <<
": ERROR: RGWRados::get_bucket_instance_info() returned ret=" <<
ret << dendl;
return ret;
rgw_bucket_dir_entry& entry = *iter;
if (entry.key.instance.empty()) {
- ldout(store->ctx(), 20) << "obj entry: " << entry.key.name << dendl;
+ ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name << dendl;
} else {
- ldout(store->ctx(), 20) << "obj entry: " << entry.key.name <<
+ ldpp_dout(dpp, 20) << "obj entry: " << entry.key.name <<
" [" << entry.key.instance << "]" << dendl;
}
- ldout(store->ctx(), 20) << __func__ << ": entry.key.name=" <<
+ ldpp_dout(dpp, 20) << __func__ << ": entry.key.name=" <<
entry.key.name << " entry.key.instance=" << entry.key.instance <<
dendl;
// bucket deletion race?
return 0;
} if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
": ERROR: process_bucket(); bucket_id=" <<
bucket_id << " returned ret=" << ret << dendl;
}
// bucket deletion race?
return 0;
} else if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
": ERROR: get_bucket_info returned ret=" << ret << dendl;
return ret;
}
&is_listing_truncated, null_yield);
if (ret == -ENOENT) {
// could bucket have been removed while this is running?
- ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ <<
": WARNING: call to list_objects of multipart namespace got ENOENT; "
"assuming bucket removal race" << dendl;
break;
} else if (ret < 0) {
- lderr(store->ctx()) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, -1) << "RGWRadosList::" << __func__ <<
": ERROR: list_objects op returned ret=" << ret << dendl;
return ret;
}
}
entry.obj = obj;
uploads.push_back(entry);
- ldout(store->ctx(), 20) << "RGWRadosList::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRadosList::" << __func__ <<
" processing incomplete multipart entry " <<
entry << dendl;
}
rgw_pubsub_sub_config sub_conf;
int ret = get_conf(&sub_conf);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read sub config: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read sub config: ret=" << ret << dendl;
return ret;
}
return 0;
}
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
return ret;
}
ret = list_op.list_objects(dpp, max_events, &objs, nullptr, &list.is_truncated, null_yield);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to list bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to list bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
return ret;
}
if (list.is_truncated) {
try {
bl.decode_base64(bl64);
} catch (buffer::error& err) {
- ldout(store->ctx(), 1) << "ERROR: failed to event (not a valid base64)" << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to event (not a valid base64)" << dendl;
continue;
}
EventType event;
try {
decode(event, iter);
} catch (buffer::error& err) {
- ldout(store->ctx(), 1) << "ERROR: failed to decode event" << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to decode event" << dendl;
continue;
};
rgw_pubsub_sub_config sub_conf;
int ret = get_conf(&sub_conf);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read sub config: ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read sub config: ret=" << ret << dendl;
return ret;
}
string tenant;
ret = store->getRados()->get_bucket_info(store->svc(), tenant, sub_conf.dest.bucket_name, bucket_info, nullptr, null_yield, nullptr);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to read bucket info for events bucket: bucket=" << sub_conf.dest.bucket_name << " ret=" << ret << dendl;
return ret;
}
ret = del_op.delete_obj(null_yield, dpp);
if (ret < 0) {
- ldout(store->ctx(), 1) << "ERROR: failed to remove event (obj=" << obj << "): ret=" << ret << dendl;
+ ldpp_dout(dpp, 1) << "ERROR: failed to remove event (obj=" << obj << "): ret=" << ret << dendl;
}
return 0;
}
return bs->bucket_obj.operate(&o, null_yield);
});
if (r < 0) {
- ldout(cct, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl;
+ ldpp_dout(this, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl;
/* ignoring error, can't do anything about it */
continue;
}
rgw_obj_index_key index_key = entry.key;
rgw_obj_key obj(index_key);
- ldout(cct, 20) << "RGWRados::Bucket::List::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::Bucket::List::" << __func__ <<
" considering entry " << entry.key << dendl;
/* note that parse_raw_oid() here will not set the correct
get_obj_bucket_and_oid_loc(loc, oid, locator);
ref.pool.ioctx().locator_set_key(locator);
- ldout(cct, 20) << __func__ << ": key=" << key << " oid=" << oid << " locator=" << locator << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": key=" << key << " oid=" << oid << " locator=" << locator << dendl;
r = ioctx.stat(oid, NULL, NULL);
if (r != -ENOENT) {
/* cannot find a broken part */
continue;
}
- ldout(cct, 20) << __func__ << ": found bad object part: " << loc << dendl;
+ ldpp_dout(dpp, 20) << __func__ << ": found bad object part: " << loc << dendl;
if (need_fix) {
*need_fix = true;
}
ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl;
+ ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl;
return 0;
}
ldout(store->ctx(), 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
- ldout(store->ctx(), 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl;
+ ldpp_dout(dpp, 20) << " bucket index oid: " << bucket_obj.get_raw_obj() << dendl;
return 0;
}
list_op.params.prefix = obj_prefix;
list_op.params.delim = obj_delim;
- ldout(cct, 20) << "iterating listing for bucket=" << bucket_info.bucket.name
+ ldpp_dout(dpp, 20) << "iterating listing for bucket=" << bucket_info.bucket.name
<< ", obj_prefix=" << obj_prefix
<< ", obj_delim=" << obj_delim
<< dendl;
rgw_obj& obj = target->get_obj();
if (obj.get_oid().empty()) {
- ldout(store->ctx(), 0) << "ERROR: " << __func__ << "(): cannot write object with empty name" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): cannot write object with empty name" << dendl;
return -EIO;
}
(!bucket_info.bucket.bucket_id.empty() &&
ep.bucket.bucket_id != bucket_info.bucket.bucket_id)) {
if (r != -ENOENT) {
- ldout(cct, 0) << "ERROR: read_bucket_entrypoint_info() bucket=" << bucket_info.bucket << " returned error: r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() bucket=" << bucket_info.bucket << " returned error: r=" << r << dendl;
/* we have no idea what caused the error, will not try to remove it */
}
/*
for (iter = buckets.begin(); iter != buckets.end(); ++iter) {
rgw_bucket& bucket = *iter;
if (enabled)
- ldout(cct, 20) << "enabling bucket name=" << bucket.name << dendl;
+ ldpp_dout(dpp, 20) << "enabling bucket name=" << bucket.name << dendl;
else
- ldout(cct, 20) << "disabling bucket name=" << bucket.name << dendl;
+ ldpp_dout(dpp, 20) << "disabling bucket name=" << bucket.name << dendl;
RGWBucketInfo info;
map<string, bufferlist> attrs;
return r;
if (!state->is_atomic) {
- ldout(cct, 20) << "state for obj=" << obj << " is not atomic, not deferring gc operation" << dendl;
+ ldpp_dout(dpp, 20) << "state for obj=" << obj << " is not atomic, not deferring gc operation" << dendl;
return -EINVAL;
}
} else if (state->obj_tag.length() > 0) {
tag = state->obj_tag.c_str();
} else {
- ldout(cct, 20) << "state->obj_tag is empty, not deferring gc operation" << dendl;
+ ldpp_dout(dpp, 20) << "state->obj_tag is empty, not deferring gc operation" << dendl;
return -EINVAL;
}
- ldout(cct, 0) << "defer chain tag=" << tag << dendl;
+ ldpp_dout(dpp, 0) << "defer chain tag=" << tag << dendl;
cls_rgw_obj_chain chain;
update_gc_chain(state->obj, *state->manifest, &chain);
unmod.tv_nsec = 0;
}
- ldout(store->ctx(), 10) << "If-UnModified-Since: " << params.unmod_since << " Last-Modified: " << ctime << dendl;
+ ldpp_dout(dpp, 10) << "If-UnModified-Since: " << params.unmod_since << " Last-Modified: " << ctime << dendl;
if (ctime > unmod) {
return -ERR_PRECONDITION_FAILED;
}
auto iter = bl.cbegin();
decode(delete_at, iter);
} catch (buffer::error& err) {
- ldout(store->ctx(), 0) << "ERROR: couldn't decode RGW_ATTR_DELETE_AT" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode RGW_ATTR_DELETE_AT" << dendl;
return -EIO;
}
int ret = target->complete_atomic_modification();
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl;
}
/* other than that, no need to propagate error */
} else {
bool need_follow_olh = follow_olh && obj.key.instance.empty();
RGWObjState *s = rctx->get_state(obj);
- ldout(cct, 20) << "get_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl;
+ ldpp_dout(dpp, 20) << "get_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl;
*state = s;
if (s->has_attrs) {
if (s->is_olh && need_follow_olh) {
s->mtime = entry.mtime;
s->zone_short_id = entry.zone_short_id;
s->pg_ver = entry.pg_ver;
- ldout(cct, 20) << __func__ << "(): found obj in tombstone cache: obj=" << obj
+ ldpp_dout(dpp, 20) << __func__ << "(): found obj in tombstone cache: obj=" << obj
<< " mtime=" << s->mtime << " pgv=" << s->pg_ver << dendl;
} else {
s->mtime = real_time();
decode(info, p);
s->accounted_size = info.orig_size;
} catch (buffer::error&) {
- dout(0) << "ERROR: could not decode compression info for object: " << obj << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode compression info for object: " << obj << dendl;
return -EIO;
}
}
if (!compressed)
s->accounted_size = s->size;
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode manifest" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl;
return -EIO;
}
- ldout(cct, 10) << "manifest: total_size = " << s->manifest->get_obj_size() << dendl;
+ ldpp_dout(dpp, 10) << "manifest: total_size = " << s->manifest->get_obj_size() << dendl;
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>() && \
s->manifest->has_explicit_objs()) {
RGWObjManifest::obj_iterator mi;
for (mi = s->manifest->obj_begin(); mi != s->manifest->obj_end(); ++mi) {
- ldout(cct, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl;
+ ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(store) << dendl;
}
}
try {
decode(s->pg_ver, pgbl);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode pg ver attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode pg ver attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
}
}
}
try {
decode(s->zone_short_id, zbl);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: couldn't decode zone short id attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: couldn't decode zone short id attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
}
}
}
if (s->obj_tag.length())
- ldout(cct, 20) << "get_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl;
+ ldpp_dout(dpp, 20) << "get_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl;
else
- ldout(cct, 20) << "get_obj_state: s->obj_tag was set empty" << dendl;
+ ldpp_dout(dpp, 20) << "get_obj_state: s->obj_tag was set empty" << dendl;
/* an object might not be olh yet, but could have olh id tag, so we should set it anyway if
* it exist, and not only if is_olh() returns true
(!state->fake_tag);
if (!state->is_atomic) {
- ldout(store->ctx(), 20) << "prepare_atomic_modification: state is not atomic. state=" << (void *)state << dendl;
+ ldpp_dout(dpp, 20) << "prepare_atomic_modification: state is not atomic. state=" << (void *)state << dendl;
if (reset_obj) {
op.create(false);
bufferlist bl;
bl.append(state->write_tag.c_str(), state->write_tag.size() + 1);
- ldout(store->ctx(), 10) << "setting object write_tag=" << state->write_tag << dendl;
+ ldpp_dout(dpp, 10) << "setting object write_tag=" << state->write_tag << dendl;
op.setxattr(RGW_ATTR_ID_TAG, bl);
if (modify_tail) {
obj_expirer->hint_add(ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl;
}
}
}
if (acl_bl && acl_bl->length()) {
int ret = store->decode_policy(*acl_bl, &owner);
if (ret < 0) {
- ldout(store->ctx(), 0) << "WARNING: could not decode policy ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: could not decode policy ret=" << ret << dendl;
}
}
ent.meta.owner = owner.get_id().to_str();
if (r < 0) {
return r;
}
- ldout(cct, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl;
+ ldpp_dout(dpp, 20) << "reshard completion identified, new_bucket_id=" << new_bucket_id << dendl;
i = 0; /* resharding is finished, make sure we can retry */
obj = *pobj;
if (ret == -ENOENT) {
return fetch_new_bucket_id("get_bucket_resharding_failed", new_bucket_id);
} else if (ret < 0) {
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to get bucket resharding : " << cpp_strerror(-ret) <<
dendl;
return ret;
new_bucket_id);
}
- ldout(cct, 20) << "NOTICE: reshard still in progress; " <<
+ ldpp_dout(dpp, 20) << "NOTICE: reshard still in progress; " <<
(i < num_retries ? "retrying" : "too many retries") << dendl;
if (i == num_retries) {
RGWBucketReshardLock reshard_lock(this->store, bucket_info, true);
ret = reshard_lock.lock();
if (ret < 0) {
- ldout(cct, 20) << __func__ <<
+ ldpp_dout(dpp, 20) << __func__ <<
" INFO: failed to take reshard lock for bucket " <<
bucket_id << "; expected if resharding underway" << dendl;
} else {
- ldout(cct, 10) << __func__ <<
+ ldpp_dout(dpp, 10) << __func__ <<
" INFO: was able to take reshard lock for bucket " <<
bucket_id << dendl;
ret = RGWBucketReshard::clear_resharding(this->store, bucket_info);
if (ret < 0) {
reshard_lock.unlock();
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to clear resharding flags for bucket " <<
bucket_id << dendl;
} else {
reshard_lock.unlock();
- ldout(cct, 5) << __func__ <<
+ ldpp_dout(dpp, 5) << __func__ <<
" INFO: apparently successfully cleared resharding flags for "
"bucket " << bucket_id << dendl;
continue; // if we apparently succeed immediately test again
ret = reshard_wait->wait(y);
if (ret < 0) {
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR: bucket is still resharding, please retry" << dendl;
return ret;
}
} // for loop
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
" ERROR: bucket is still resharding, please retry" << dendl;
return -ERR_BUSY_RESHARDING;
}
return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
- ldout(cct, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl;
return r;
}
return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (r < 0) {
- ldout(cct, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl;
return r;
}
return r;
});
if (ret < 0) {
- ldout(cct, 20) << "cls_rgw_get_olh_log() returned r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "cls_rgw_get_olh_log() returned r=" << r << dendl;
return ret;
}
return pbs->bucket_obj.operate(&op, null_yield);
});
if (ret < 0) {
- ldout(cct, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl;
+ ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl;
return ret;
}
return rgw_rados_operate(ref.pool.ioctx(), ref.obj.oid, &op, null_yield);
});
if (ret < 0) {
- ldout(cct, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl;
+ ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl;
return ret;
}
for (; viter != iter->second.end(); ++viter) {
rgw_bucket_olh_log_entry& entry = *viter;
- ldout(cct, 20) << "olh_log_entry: epoch=" << iter->first << " op=" << (int)entry.op
+ ldpp_dout(dpp, 20) << "olh_log_entry: epoch=" << iter->first << " op=" << (int)entry.op
<< " key=" << entry.key.name << "[" << entry.key.instance << "] "
<< (entry.delete_marker ? "(delete)" : "") << dendl;
switch (entry.op) {
// only overwrite a link of the same epoch if its key sorts before
if (link_epoch < iter->first || key.instance.empty() ||
key.instance > entry.key.instance) {
- ldout(cct, 20) << "apply_olh_log applying key=" << entry.key << " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
+ ldpp_dout(dpp, 20) << "apply_olh_log applying key=" << entry.key << " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
<< " over current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl;
need_to_link = true;
need_to_remove = false;
key = entry.key;
delete_marker = entry.delete_marker;
} else {
- ldout(cct, 20) << "apply_olh skipping key=" << entry.key<< " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
+ ldpp_dout(dpp, 20) << "apply_olh skipping key=" << entry.key<< " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
<< " before current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl;
}
break;
need_to_link = false;
break;
default:
- ldout(cct, 0) << "ERROR: apply_olh_log: invalid op: " << (int)entry.op << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: apply_olh_log: invalid op: " << (int)entry.op << dendl;
return -EIO;
}
string attr_name = RGW_ATTR_OLH_PENDING_PREFIX;
op_tag, meta, olh_epoch, unmod_since, high_precision_time,
zones_trace, log_data_change);
if (ret < 0) {
- ldout(cct, 20) << "bucket_index_link_olh() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "bucket_index_link_olh() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
if (ret == -ECANCELED) {
// the bucket index rejected the link_olh() due to olh tag mismatch;
// attempt to reconstruct olh head attributes based on the bucket index
ret = 0;
}
if (ret < 0) {
- ldout(cct, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
return ret;
}
return 0;
}
if (ret < 0) {
- ldout(cct, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
+ ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
return ret;
}
* few results, perhaps due to filtering or to a series of
* namespaced entries */
- ldout(cct, 10) << "RGWRados::" << __func__ << ": " << bucket_info.bucket <<
+ ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": " << bucket_info.bucket <<
" start_after=\"" << start_after.name <<
"[" << start_after.instance <<
"]\", prefix=\"" << prefix <<
num_entries_per_shard = num_entries;
}
- ldout(cct, 10) << "RGWRados::" << __func__ <<
+ ldpp_dout(dpp, 10) << "RGWRados::" << __func__ <<
" request from each of " << shard_count <<
" shard(s) for " << num_entries_per_shard << " entries to get " <<
num_entries << " total entries" << dendl;
const string& name = tracker.entry_name();
rgw_bucket_dir_entry& dirent = tracker.dir_entry();
- ldout(cct, 20) << "RGWRados::" << __func__ << " currently processing " <<
+ ldpp_dout(dpp, 20) << "RGWRados::" << __func__ << " currently processing " <<
dirent.key << " from shard " << tracker.shard_idx << dendl;
const bool force_check =
}
if (r >= 0) {
- ldout(cct, 10) << "RGWRados::" << __func__ << ": got " <<
+ ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": got " <<
dirent.key.name << "[" << dirent.key.instance << "]" << dendl;
m[name] = std::move(dirent);
last_entry_visited = &(m[name]);
++count;
} else {
- ldout(cct, 10) << "RGWRados::" << __func__ << ": skipping " <<
+ ldpp_dout(dpp, 10) << "RGWRados::" << __func__ << ": skipping " <<
dirent.key.name << "[" << dirent.key.instance << "]" << dendl;
last_entry_visited = &tracker.dir_entry();
}
}
}
- ldout(cct, 20) << "RGWRados::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::" << __func__ <<
": returning, count=" << count << ", is_truncated=" << *is_truncated <<
dendl;
if (*is_truncated && count < num_entries) {
- ldout(cct, 10) << "RGWRados::" << __func__ <<
+ ldpp_dout(dpp, 10) << "RGWRados::" << __func__ <<
": INFO requested " << num_entries << " entries but returning " <<
count << ", which is truncated" << dendl;
}
if (last_entry_visited != nullptr && last_entry) {
*last_entry = last_entry_visited->key;
- ldout(cct, 20) << "RGWRados::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::" << __func__ <<
": returning, last_entry=" << *last_entry << dendl;
} else {
- ldout(cct, 20) << "RGWRados::" << __func__ <<
+ ldpp_dout(dpp, 20) << "RGWRados::" << __func__ <<
": returning, last_entry NOT SET" << dendl;
}
rgw_obj_index_key *last_entry,
optional_yield y,
check_filter_t force_check_filter) {
- ldout(cct, 10) << "cls_bucket_list_unordered " << bucket_info.bucket <<
+ ldpp_dout(dpp, 10) << "cls_bucket_list_unordered " << bucket_info.bucket <<
" start_after " << start_after.name << "[" << start_after.instance <<
"] num_entries " << num_entries << dendl;
// at this point either r >=0 or r == -ENOENT
if (r >= 0) { // i.e., if r != -ENOENT
- ldout(cct, 10) << "RGWRados::cls_bucket_list_unordered: got " <<
+ ldpp_dout(dpp, 10) << "RGWRados::cls_bucket_list_unordered: got " <<
dirent.key.name << "[" << dirent.key.instance << "]" << dendl;
if (count < num_entries) {
get_obj_bucket_and_oid_loc(obj, oid, loc);
if (loc != list_state.locator) {
- ldout(cct, 0) << "WARNING: generated locator (" << loc << ") is different from listed locator (" << list_state.locator << ")" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: generated locator (" << loc << ") is different from listed locator (" << list_state.locator << ")" << dendl;
}
io_ctx.locator_set_key(list_state.locator);
return 0;
}
- ldout(cct, 1) << "RGWRados::" << __func__ << " bucket " << bucket.name <<
+ ldpp_dout(dpp, 1) << "RGWRados::" << __func__ << " bucket " << bucket.name <<
" needs resharding; current num shards " << bucket_info.layout.current_index.layout.normal.num_shards <<
"; new num shards " << final_num_shards << " (suggested " <<
suggested_num_shards << ")" << dendl;
new_num_shards = std::min(new_num_shards, get_max_bucket_shards());
if (new_num_shards <= num_source_shards) {
- ldout(cct, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl;
+ ldpp_dout(dpp, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl;
return 0;
}
bucket_info.reshard_status = s;
int ret = store->getRados()->put_bucket_instance_info(bucket_info, false, real_time(), &bucket_attrs, dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to write bucket info, ret=" << ret << dendl;
return ret;
}
return 0;
list<rgw_cls_bi_entry> entries;
if (max_entries < 0) {
- ldout(store->ctx(), 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": can't reshard, negative max_entries" << dendl;
return -EINVAL;
}
int ret = bucket_info_updater.start();
if (ret < 0) {
- ldout(store->ctx(), 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl;
return ret;
}
ret = store->ctl()->bucket->link_bucket(new_bucket_info.owner, new_bucket_info.bucket, bucket_info.creation_time, null_yield, dpp);
if (ret < 0) {
- lderr(store->ctx()) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl;
+ ldpp_dout(dpp, -1) << "failed to link new bucket instance (bucket_id=" << new_bucket_info.bucket.bucket_id << ": " << cpp_strerror(-ret) << ")" << dendl;
return ret;
}
ret = bucket_info_updater.complete();
if (ret < 0) {
- ldout(store->ctx(), 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << __func__ << ": failed to update bucket info ret=" << ret << dendl;
/* don't error out, reshard process succeeded */
}
ret = store->ctl()->bucket->remove_bucket_instance_info(bucket_info.bucket,
bucket_info, null_yield, dpp);
if (ret < 0) {
- lderr(store->ctx()) << "Error: " << __func__ <<
+ ldpp_dout(dpp, -1) << "Error: " << __func__ <<
" failed to clean old bucket info object \"" <<
bucket_info.bucket.get_key() <<
"\"created after successful resharding with error " << ret << dendl;
}
- ldout(store->ctx(), 1) << __func__ <<
+ ldpp_dout(dpp, 1) << __func__ <<
" INFO: reshard of bucket \"" << bucket_info.bucket.name << "\" from \"" <<
bucket_info.bucket.get_key() << "\" to \"" <<
new_bucket_info.bucket.get_key() << "\" completed successfully" << dendl;
new_bucket_info,
null_yield, dpp);
if (ret2 < 0) {
- lderr(store->ctx()) << "Error: " << __func__ <<
+ ldpp_dout(dpp, -1) << "Error: " << __func__ <<
" failed to clean bucket info object \"" <<
new_bucket_info.bucket.get_key() <<
"\"created during incomplete resharding with error " << ret2 << dendl;
int ret = logshard_lock.lock();
if (ret < 0) {
- ldout(store->ctx(), 5) << __func__ << "(): failed to acquire lock on " <<
+ ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " <<
logshard_oid << ", ret = " << ret <<dendl;
return ret;
}
std::list<cls_rgw_reshard_entry> entries;
ret = list(logshard_num, marker, max_entries, entries, &truncated);
if (ret < 0) {
- ldout(cct, 10) << "cannot list all reshards in logshard oid=" <<
+ ldpp_dout(dpp, 10) << "cannot list all reshards in logshard oid=" <<
logshard_oid << dendl;
continue;
}
for(auto& entry: entries) { // logshard entries
if(entry.new_instance_id.empty()) {
- ldout(store->ctx(), 20) << __func__ << " resharding " <<
+ ldpp_dout(dpp, 20) << __func__ << " resharding " <<
entry.bucket_name << dendl;
rgw_bucket bucket;
null_yield, dpp, &attrs);
if (ret < 0 || bucket_info.bucket.bucket_id != entry.bucket_id) {
if (ret < 0) {
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": Error in get_bucket_info for bucket " << entry.bucket_name <<
": " << cpp_strerror(-ret) << dendl;
if (ret != -ENOENT) {
return ret;
}
} else {
- ldout(cct,0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": Bucket: " << entry.bucket_name <<
" already resharded by someone, skipping " << dendl;
}
// we've encountered a reshard queue entry for an apparently
// non-existent bucket; let's try to recover by cleaning up
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": removing reshard queue entry for a resharded or non-existent bucket" <<
entry.bucket_name << dendl;
ret = remove(entry);
if (ret < 0) {
- ldout(cct, 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": Error removing non-existent bucket " <<
entry.bucket_name << " from resharding queue: " <<
cpp_strerror(-ret) << dendl;
ret = br.execute(entry.new_num_shards, max_entries, dpp, false, nullptr,
nullptr, this);
if (ret < 0) {
- ldout(store->ctx(), 0) << __func__ <<
+ ldpp_dout(dpp, 0) << __func__ <<
": Error during resharding bucket " << entry.bucket_name << ":" <<
cpp_strerror(-ret)<< dendl;
return ret;
}
- ldout(store->ctx(), 20) << __func__ <<
+ ldpp_dout(dpp, 20) << __func__ <<
" removing reshard queue entry for bucket " << entry.bucket_name <<
dendl;
ret = remove(entry);
if (ret < 0) {
- ldout(cct, 0) << __func__ << ": Error removing bucket " <<
+ ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " <<
entry.bucket_name << " from resharding queue: " <<
cpp_strerror(-ret) << dendl;
return ret;
int RGWReshard::process_all_logshards(const DoutPrefixProvider *dpp)
{
if (!store->svc()->zone->can_reshard()) {
- ldout(store->ctx(), 20) << __func__ << " Resharding is disabled" << dendl;
+ ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
int ret = 0;
string logshard;
get_logshard_oid(i, &logshard);
- ldout(store->ctx(), 20) << "processing logshard = " << logshard << dendl;
+ ldpp_dout(dpp, 20) << "processing logshard = " << logshard << dendl;
ret = process_single_logshard(i, dpp);
- ldout(store->ctx(), 20) << "finish processing logshard = " << logshard << " , ret = " << ret << dendl;
+ ldpp_dout(dpp, 20) << "finish processing logshard = " << logshard << " , ret = " << ret << dendl;
}
return 0;
/* Get keys */
op_ret = meta_mgr->get(metadata_key, s->formatter, s->yield, s);
if (op_ret < 0) {
- dout(5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(s, 5) << "ERROR: can't get key: " << cpp_strerror(op_ret) << dendl;
return;
}
op_ret = store->ctl()->meta.mgr->put(metadata_key, bl, s->yield, s, sync_type,
false, &ondisk_version);
if (op_ret < 0) {
- dout(5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(s, 5) << "ERROR: can't put key: " << cpp_strerror(op_ret) << dendl;
return;
}
// translate internal codes into return header
frame_metadata_key(s, metadata_key);
op_ret = store->ctl()->meta.mgr->remove(metadata_key, s->yield, s);
if (op_ret < 0) {
- dout(5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl;
+ ldpp_dout(s, 5) << "ERROR: can't remove key: " << cpp_strerror(op_ret) << dendl;
return;
}
op_ret = 0;
for (const auto& it : result) {
s->formatter->open_object_section("Arn");
auto& arn = it.get_arn();
- ldout(s->cct, 0) << "ARN: " << arn << dendl;
+ ldpp_dout(s, 0) << "ARN: " << arn << dendl;
s->formatter->dump_string("Arn", arn);
s->formatter->close_section();
}
auto sub = ps->get_sub_with_events(sub_name);
op_ret = sub->remove_event(s, event_id);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to ack event on subscription '" << sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to ack event on subscription '" << sub_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully acked event on subscription '" << sub_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully acked event on subscription '" << sub_name << "'" << dendl;
}
void RGWPSPullSubEventsOp::execute(optional_yield y) {
}
op_ret = sub->list_events(s, marker, max_entries);
if (op_ret < 0) {
- ldout(s->cct, 1) << "failed to get events from subscription '" << sub_name << "', ret=" << op_ret << dendl;
+ ldpp_dout(this, 1) << "failed to get events from subscription '" << sub_name << "', ret=" << op_ret << dendl;
return;
}
- ldout(s->cct, 20) << "successfully got events from subscription '" << sub_name << "'" << dendl;
+ ldpp_dout(this, 20) << "successfully got events from subscription '" << sub_name << "'" << dendl;
}
case RGW_OP_GET_OBJ://s3select its post-method(payload contain the query) , the request is get-object
break;
default:
- dout(10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED" << dendl;
+ ldpp_dout(s, 10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED" << dendl;
throw -ERR_NOT_IMPLEMENTED;
}
/* IMHO "streamed" doesn't fit too good here. I would prefer to call
* it "chunked" but let's be coherent with Amazon's terminology. */
- dout(10) << "body content detected in multiple chunks" << dendl;
+ ldpp_dout(s, 10) << "body content detected in multiple chunks" << dendl;
/* payload in multiple chunks */
case RGW_OP_PUT_OBJ:
break;
default:
- dout(10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED (streaming mode)" << dendl;
+ ldpp_dout(s, 10) << "ERROR: AWS4 completion for this operation NOT IMPLEMENTED (streaming mode)" << dendl;
throw -ERR_NOT_IMPLEMENTED;
}
- dout(10) << "aws4 seed signature ok... delaying v4 auth" << dendl;
+ ldpp_dout(s, 10) << "aws4 seed signature ok... delaying v4 auth" << dendl;
/* In the case of streamed payload client sets the x-amz-content-sha256
* to "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" but uses "UNSIGNED-PAYLOAD"
string rArn = s->info.args.get("RoleArn");
const auto& [ret, role] = sts.getRoleInfo(s, rArn, y);
if (ret < 0) {
- ldout(s->cct, 0) << "failed to get role info using role arn: " << rArn << dendl;
+ ldpp_dout(this, 0) << "failed to get role info using role arn: " << rArn << dendl;
return ret;
}
string policy = role.get_assume_role_policy();
RGWUser user;
op_ret = user.init(s, store, op_state, y);
if (op_ret < 0) {
- ldout(store->ctx(), 20) << "failed initializing user info: " << op_ret << dendl;
+ ldpp_dout(this, 20) << "failed initializing user info: " << op_ret << dendl;
return;
}
UserQuotas quotas;
if ((op_ret = rgw_rest_get_json_input(store->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) {
- ldout(store->ctx(), 20) << "failed to retrieve input" << dendl;
+ ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
return;
}
bool empty;
op_ret = rgw_rest_get_json_input(store->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
if (op_ret < 0) {
- ldout(store->ctx(), 20) << "failed to retrieve input" << dendl;
+ ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
if (!empty)
return;
string err_msg;
op_ret = user.info(info, &err_msg);
if (op_ret < 0) {
- ldout(store->ctx(), 20) << "failed to get user info: " << op_ret << dendl;
+ ldpp_dout(this, 20) << "failed to get user info: " << op_ret << dendl;
return;
}
RGWQuotaInfo *old_quota;
string err;
op_ret = user.modify(s, op_state, y, &err);
if (op_ret < 0) {
- ldout(store->ctx(), 20) << "failed updating user info: " << op_ret << ": " << err << dendl;
+ ldpp_dout(this, 20) << "failed updating user info: " << op_ret << ": " << err << dendl;
return;
}
}
map<string, bufferlist> uattrs;
op_ret = store->ctl()->user->get_attrs_by_uid(s, user_id, &uattrs, s->yield);
if (op_ret == -ENOENT) {
- ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
+ ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
policy = policies[policy_name];
dump(s->formatter);
} else {
- ldout(s->cct, 0) << "ERROR: policy not found" << policy << dendl;
+ ldpp_dout(this, 0) << "ERROR: policy not found" << policy << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
} else {
- ldout(s->cct, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
+ ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
map<string, bufferlist> uattrs;
op_ret = store->ctl()->user->get_attrs_by_uid(s, user_id, &uattrs, s->yield);
if (op_ret == -ENOENT) {
- ldout(s->cct, 0) << "ERROR: attrs not found for user" << user_name << dendl;
+ ldpp_dout(this, 0) << "ERROR: attrs not found for user" << user_name << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
s->formatter->close_section();
s->formatter->close_section();
} else {
- ldout(s->cct, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
+ ldpp_dout(this, 0) << "ERROR: RGW_ATTR_USER_POLICY not found" << dendl;
op_ret = -ERR_NO_SUCH_ENTITY;
return;
}
/* check to see the name is not used */
ret = read_id(dpp, name, tenant, id, y);
if (exclusive && ret == 0) {
- ldout(cct, 0) << "ERROR: name " << name << " already in use for role id "
+ ldpp_dout(dpp, 0) << "ERROR: name " << name << " already in use for role id "
<< id << dendl;
return -EEXIST;
} else if ( ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading role id " << id << ": "
+ ldpp_dout(dpp, 0) << "failed reading role id " << id << ": "
<< cpp_strerror(-ret) << dendl;
return ret;
}
auto& pool = svc->zone->get_zone_params().roles_pool;
ret = store_info(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing role info in pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: storing role info in pool: " << pool.name << ": "
<< id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = store_name(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing role name in pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: storing role name in pool: " << pool.name << ": "
<< name << ": " << cpp_strerror(-ret) << dendl;
//Delete the role info that was stored in the previous call
string oid = get_info_oid_prefix() + id;
int info_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (info_ret < 0) {
- ldout(cct, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": "
<< id << ": " << cpp_strerror(-info_ret) << dendl;
}
return ret;
ret = store_path(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing role path in pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: storing role path in pool: " << pool.name << ": "
<< path << ": " << cpp_strerror(-ret) << dendl;
//Delete the role info that was stored in the previous call
string oid = get_info_oid_prefix() + id;
int info_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (info_ret < 0) {
- ldout(cct, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from pool: " << pool.name << ": "
<< id << ": " << cpp_strerror(-info_ret) << dendl;
}
//Delete role name that was stored in previous call
oid = tenant + get_names_oid_prefix() + name;
int name_ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (name_ret < 0) {
- ldout(cct, 0) << "ERROR: cleanup of role name from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from pool: " << pool.name << ": "
<< name << ": " << cpp_strerror(-name_ret) << dendl;
}
return ret;
string oid = get_info_oid_prefix() + id;
ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: deleting role id from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: deleting role id from pool: " << pool.name << ": "
<< id << ": " << cpp_strerror(-ret) << dendl;
}
oid = tenant + get_names_oid_prefix() + name;
ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: deleting role name from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: deleting role name from pool: " << pool.name << ": "
<< name << ": " << cpp_strerror(-ret) << dendl;
}
oid = tenant + get_path_oid_prefix() + path + get_info_oid_prefix() + id;
ret = rgw_delete_system_obj(svc->sysobj, pool, oid, NULL, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: deleting role path from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: deleting role path from pool: " << pool.name << ": "
<< path << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
using ceph::decode;
decode(nameToId, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode role from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode role from pool: " << pool.name << ": "
<< role_name << dendl;
return -EIO;
}
int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed reading role info from pool: " << pool.name <<
+ ldpp_dout(dpp, 0) << "ERROR: failed reading role info from pool: " << pool.name <<
": " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
auto iter = bl.cbegin();
decode(*this, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode role info from pool: " << pool.name <<
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode role info from pool: " << pool.name <<
": " << id << dendl;
return -EIO;
}
int ret = rgw_get_system_obj(obj_ctx, pool, oid, bl, NULL, NULL, y, dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: failed reading role name from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: failed reading role name from pool: " << pool.name << ": "
<< name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
auto iter = bl.cbegin();
decode(nameToId, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: failed to decode role name from pool: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode role name from pool: " << pool.name << ": "
<< name << dendl;
return -EIO;
}
list<string> oids;
int r = store->list_raw_objects(pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
- ldout(cct, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": "
+ ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: " << pool.name << ": "
<< prefix << ": " << cpp_strerror(-r) << dendl;
return r;
}
return ret;
if (!results.objs.empty() && !delete_children) {
- lderr(store->ctx()) << "ERROR: could not remove non-empty bucket " << info.bucket.name <<
+ ldpp_dout(dpp, -1) << "ERROR: could not remove non-empty bucket " << info.bucket.name <<
dendl;
return -ENOTEMPTY;
}
// remain is detrius from a prior bug
ret = store->getRados()->delete_bucket(info, ot, y, dpp, !delete_children);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: could not remove bucket " <<
+ ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " <<
info.bucket.name << dendl;
return ret;
}
ret = store->ctl()->bucket->unlink_bucket(info.owner, info.bucket, y, dpp, false);
if (ret < 0) {
- lderr(store->ctx()) << "ERROR: unable to remove user bucket information" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: unable to remove user bucket information" << dendl;
}
if (forward_to_master) {
int r = finisher->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start finisher service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start finisher service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = notify->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start notify service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start notify service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = rados->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start rados service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start rados service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = zone->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start zone service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start zone service (" << cpp_strerror(-r) << dendl;
return r;
}
r = mdlog->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start mdlog service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start mdlog service (" << cpp_strerror(-r) << dendl;
return r;
}
r = sync_modules->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start sync modules service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start sync modules service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = cls->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start cls service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start cls service (" << cpp_strerror(-r) << dendl;
return r;
}
r = config_key_rados->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start config_key service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start config_key service (" << cpp_strerror(-r) << dendl;
return r;
}
r = zone_utils->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start zone_utils service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start zone_utils service (" << cpp_strerror(-r) << dendl;
return r;
}
r = quota->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start quota service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start quota service (" << cpp_strerror(-r) << dendl;
return r;
}
r = sysobj_core->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start sysobj_core service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_core service (" << cpp_strerror(-r) << dendl;
return r;
}
if (have_cache) {
r = sysobj_cache->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start sysobj_cache service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_cache service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = sysobj->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start sysobj service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = meta_be_sobj->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start meta_be_sobj service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start meta_be_sobj service (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start meta service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start meta service (" << cpp_strerror(-r) << dendl;
return r;
}
r = bucket_sobj->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start bucket service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start bucket service (" << cpp_strerror(-r) << dendl;
return r;
}
r = bucket_sync_sobj->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start bucket_sync service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start bucket_sync service (" << cpp_strerror(-r) << dendl;
return r;
}
r = user_rados->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start user_rados service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start user_rados service (" << cpp_strerror(-r) << dendl;
return r;
}
r = otp->start(y, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start otp service (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start otp service (" << cpp_strerror(-r) << dendl;
return r;
}
}
int r = _ctl.init(*svc, dpp);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl;
return r;
}
RGWRole role(cct, store->getRados()->pctl, roleName, r_arn->account);
if (int ret = role.get(dpp, y); ret < 0) {
if (ret == -ENOENT) {
- ldout(cct, 0) << "Role doesn't exist: " << roleName << dendl;
+ ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl;
ret = -ERR_NO_ROLE_FOUND;
}
return make_tuple(ret, this->role);
}
string r_path = role.get_path();
if (path != r_path) {
- ldout(cct, 0) << "Invalid Role ARN: Path in ARN does not match with the role path: " << path << " " << r_path << dendl;
+ ldpp_dout(dpp, 0) << "Invalid Role ARN: Path in ARN does not match with the role path: " << path << " " << r_path << dendl;
return make_tuple(-EACCES, this->role);
}
this->role = std::move(role);
return make_tuple(0, this->role);
}
} else {
- ldout(cct, 0) << "Invalid role arn: " << arn << dendl;
+ ldpp_dout(dpp, 0) << "Invalid role arn: " << arn << dendl;
return make_tuple(-EINVAL, this->role);
}
}
//Get the role info which is being assumed
boost::optional<rgw::ARN> r_arn = rgw::ARN::parse(req.getRoleARN());
if (r_arn == boost::none) {
- ldout(cct, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl;
+ ldpp_dout(dpp, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl;
response.retCode = -EINVAL;
return response;
}
int _send_request() override {
int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, dpp, RGWMDLogSyncType::APPLY_ALWAYS, true);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
return ret;
}
return 0;
int _send_request() override {
int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield, dpp);
if (ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
return ret;
}
return 0;
dpp));
if (retcode < 0) {
if (retcode != -ENOENT) {
- ldout(cct, 0) << "ERROR: failed to fetch policy handler for bucket=" << bucket << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to fetch policy handler for bucket=" << bucket << dendl;
}
return set_cr_error(retcode);
if (!last_cold_marker.empty() && status.marker != last_cold_marker) {
set_status("writing updated trim status");
status.marker = std::move(last_cold_marker);
- ldout(cct, 20) << "writing bucket trim marker=" << status.marker << dendl;
+ ldpp_dout(dpp, 20) << "writing bucket trim marker=" << status.marker << dendl;
using WriteStatus = RGWSimpleRadosWriteCR<BucketTrimStatus>;
yield call(new WriteStatus(store->svc()->rados->get_async_processor(), store->svc()->sysobj, obj,
status, &objv));
if (retcode < 0) {
- ldout(cct, 4) << "failed to write updated trim status: "
+ ldpp_dout(dpp, 4) << "failed to write updated trim status: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
do {
ret = user.list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y);
if (ret < 0) {
- ldout(cct, 0) << "failed to read user buckets: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl;
return ret;
}
auto& buckets = user_buckets.get_buckets();
ret = bucket->get_bucket_info(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl;
continue;
}
ret = bucket->sync_user_stats(y);
}
ret = bucket->check_bucket_shards(dpp);
if (ret < 0) {
- ldout(cct, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl;
+ ldpp_dout(dpp, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl;
}
}
} while (user_buckets.is_truncated());
ret = rgw_read_user_buckets(dpp, store, user_id, buckets, marker,
string(), max_entries, false, y);
if (ret < 0) {
- ldout(cct, 0) << "failed to read user buckets: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl;
return ret;
}
auto& m = buckets.get_buckets();
auto& bucket_ent = i.second;
ret = bucket_ent->read_bucket_stats(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl;
return ret;
}
cls_user_bucket_entry entry;
}
user_info.user_email = op_email;
} else if (op_email.empty() && op_state.user_email_specified) {
- ldout(store->ctx(), 10) << "removing email index: " << user_info.user_email << dendl;
+ ldpp_dout(dpp, 10) << "removing email index: " << user_info.user_email << dendl;
/* will be physically removed later when calling update() */
user_info.user_email.clear();
}
map<string, RGWBucketEnt>& m = buckets->get_buckets();
ret = ctl.bucket->read_buckets_stats(m, y, dpp);
if (ret < 0 && ret != -ENOENT) {
- ldout(svc.user->ctx(), 0) << "ERROR: could not get stats for buckets" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not get stats for buckets" << dendl;
return ret;
}
}
/* check to see the name is not used */
ret = read_id(name, id, y);
if (exclusive && ret == 0) {
- ldout(cct, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl;
+ ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl;
return -EEXIST;
} else if ( ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = store_info(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
{
int ret = RGWSystemMetaObj::create(dpp, y, exclusive);
if (ret < 0) {
- ldout(cct, 0) << "ERROR creating new realm object " << name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR creating new realm object " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
// create the control object for watch/notify
ret = create_control(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR creating control for new realm " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
RGWPeriod period;
}
ret = period.create(dpp, y, true);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: creating new period for realm " << name << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: creating new period for realm " << name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
} else {
ret = store_info(exclusive, y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = set_latest_epoch(y, epoch);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: setting latest epoch " << id << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
bool force_if_stale)
{
auto zone_svc = sysobj_svc->get_zone_svc();
- ldout(cct, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl;
+ ldpp_dout(dpp, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl;
// gateway must be in the master zone to commit
if (master_zone != zone_svc->get_zone_params().get_id()) {
error_stream << "Cannot commit period on zone "
// store the current metadata sync status in the period
int r = update_sync_status(store, current_period, error_stream, force_if_stale);
if (r < 0) {
- ldout(cct, 0) << "failed to update metadata sync status: "
+ ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
// create an object with a new period id
r = create(dpp, y, true);
if (r < 0) {
- ldout(cct, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as current period
r = realm.set_current_period(*this, y);
if (r < 0) {
- ldout(cct, 0) << "failed to update realm's current period: "
+ ldpp_dout(dpp, 0) << "failed to update realm's current period: "
<< cpp_strerror(-r) << dendl;
return r;
}
- ldout(cct, 4) << "Promoted to master zone and committed new period "
+ ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
<< id << dendl;
realm.notify_new_period(*this, y);
return 0;
// write the period to rados
int r = store_info(false, y);
if (r < 0) {
- ldout(cct, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as latest epoch
return 0;
}
if (r < 0) {
- ldout(cct, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
return r;
}
r = reflect(y);
if (r < 0) {
- ldout(cct, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
+ ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
return r;
}
- ldout(cct, 4) << "Committed new epoch " << epoch
+ ldpp_dout(dpp, 4) << "Committed new epoch " << epoch
<< " for period " << id << dendl;
realm.notify_new_period(*this, y);
return 0;
auto sysobj = sysobj_svc->get_obj(obj_ctx, obj);
int r = sysobj.rop().stat(y, dpp);
if (r < 0) {
- ldout(cct, 10) << "couldn't find old data placement pools config, setting up new ones for the zone" << dendl;
+ ldpp_dout(dpp, 10) << "couldn't find old data placement pools config, setting up new ones for the zone" << dendl;
/* a new system, let's set new placement info */
RGWZonePlacementInfo default_placement;
default_placement.index_pool = name + "." + default_bucket_index_pool_suffix;
r = fix_pool_names(y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: fix_pool_names returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl;
return r;
}
// so we don't override an existing default
r = set_as_default(y, true);
if (r < 0 && r != -EEXIST) {
- ldout(cct, 10) << "WARNING: failed to set zone as default, r=" << r << dendl;
+ ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl;
}
return 0;
for (int i = 0; i < shards_num; ++i, ++shard_id) {
ret = svc.datalog_rados->add_entry(dpp, info, shard_id);
if (ret < 0) {
- lderr(cct) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << shard_id << ")" << dendl;
return ret;
}
}
try {
decode(*entry_point, iter);
} catch (buffer::error& err) {
- ldout(cct, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl;
return -EIO;
}
return 0;
if (auto e = binfo_cache->find(cache_key)) {
if (refresh_version &&
e->info.objv_tracker.read_version.compare(&(*refresh_version))) {
- lderr(cct) << "WARNING: The bucket info cache is inconsistent. This is "
+ ldpp_dout(dpp, -1) << "WARNING: The bucket info cache is inconsistent. This is "
<< "a failure that should be debugged. I am a nice machine, "
<< "so I will try to recover." << dendl;
binfo_cache->invalidate(key);
if (ret < 0) {
if (ret != -ENOENT) {
- lderr(cct) << "ERROR: do_read_bucket_instance_info failed: " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: do_read_bucket_instance_info failed: " << ret << dendl;
} else {
ldpp_dout(dpp, 20) << "do_read_bucket_instance_info, bucket instance not found (key=" << key << ")" << dendl;
}
if (refresh_version &&
refresh_version->compare(&info->objv_tracker.read_version)) {
- lderr(cct) << "WARNING: The OSD has the same version I have. Something may "
+ ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may "
<< "have gone squirrelly. An administrator may have forced a "
<< "change; otherwise there is a problem somewhere." << dendl;
}
&cache_info, refresh_version);
*info = e.info;
if (ret < 0) {
- lderr(cct) << "ERROR: read_bucket_instance_from_oid failed: " << ret << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: read_bucket_instance_from_oid failed: " << ret << dendl;
info->bucket = bucket;
// XXX and why return anything in case of an error anyway?
return ret;
if (refresh_version &&
refresh_version->compare(&info->objv_tracker.read_version)) {
- lderr(cct) << "WARNING: The OSD has the same version I have. Something may "
+ ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may "
<< "have gone squirrelly. An administrator may have forced a "
<< "change; otherwise there is a problem somewhere." << dendl;
}
int r = svc.bucket_sync->handle_bi_removal(info, y);
if (r < 0) {
- ldout(cct, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl;
/* returning success as index is just keeping hints, so will keep extra hints,
* but bucket removal succeeded
*/
&cache_info);
if (r < 0) {
if (r != -ENOENT) {
- ldout(cct, 0) << "ERROR: svc.bucket->read_bucket_instance_info(key=" << bucket_key << ") returned r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: svc.bucket->read_bucket_instance_info(key=" << bucket_key << ") returned r=" << r << dendl;
}
return r;
}
auto zone_policy_handler = svc.zone->get_sync_policy_handler(zone);
if (!zone_policy_handler) {
- ldout(cct, 20) << "ERROR: could not find policy handler for zone=" << zone << dendl;
+ ldpp_dout(dpp, 20) << "ERROR: could not find policy handler for zone=" << zone << dendl;
return -ENOENT;
}
r = e.handler->init(y);
if (r < 0) {
- ldout(cct, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl;
return r;
}
zone_policy_handler,
temp_map, y, dpp);
if (r < 0) {
- ldout(cct, 20) << "ERROR: failed to resolve policy hints: bucket_key=" << bucket_key << ", r=" << r << dendl;
+ ldpp_dout(dpp, 20) << "ERROR: failed to resolve policy hints: bucket_key=" << bucket_key << ", r=" << r << dendl;
return r;
}
if (!sync_policy_cache->put(svc.cache, cache_key, &e, {&cache_info})) {
- ldout(cct, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl;
+ ldpp_dout(dpp, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl;
}
*handler = e.handler;
auto sysobj = obj_ctx.get_obj(obj);
ret = sysobj.wop().remove(y);
if (ret < 0) {
- ldout(cct, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: meta history is empty, but cannot remove it (" << cpp_strerror(-ret) << ")" << dendl;
return ret;
}
return -ENOENT;
auto p = bl.cbegin();
state->decode(p);
} catch (buffer::error& e) {
- ldout(cct, 1) << "failed to decode the mdlog history: "
+ ldpp_dout(dpp, 1) << "failed to decode the mdlog history: "
<< e.what() << dendl;
return -EIO;
}
return ret;
}
- ldout(cct, 20) << "started sync module instance, tier type = " << zone_public_config.tier_type << dendl;
+ ldpp_dout(dpp, 20) << "started sync module instance, tier type = " << zone_public_config.tier_type << dendl;
return 0;
}
int r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &be_handler);
if (r < 0) {
- ldout(ctx(), 0) << "ERROR: failed to create be handler: r=" << r << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl;
return r;
}
const DoutPrefixProvider *dpp)
{
if(user.id == RGW_USER_ANON_ID) {
- ldout(svc.meta_be->ctx(), 20) << "RGWSI_User_RADOS::read_user_info(): anonymous user" << dendl;
+ ldpp_dout(dpp, 20) << "RGWSI_User_RADOS::read_user_info(): anonymous user" << dendl;
return -ENOENT;
}
bufferlist bl;
try {
decode(user_id, iter);
if (user_id.user_id != user) {
- lderr(svc.meta_be->ctx()) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.user_id << " != " << user << dendl;
+ ldpp_dout(dpp, -1) << "ERROR: rgw_get_user_info_by_uid(): user id mismatch: " << user_id.user_id << " != " << user << dendl;
return -EIO;
}
if (!iter.end()) {
decode(*info, iter);
}
} catch (buffer::error& err) {
- ldout(svc.meta_be->ctx(), 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl;
return -EIO;
}
int r = svc.user->get_user_info_by_swift(ctx, k.id, &inf, nullptr, nullptr, y, dpp);
if (r >= 0 && inf.user_id != info.user_id &&
(!old_info || inf.user_id != old_info->user_id)) {
- ldout(svc.meta_be->ctx(), 0) << "WARNING: can't store user info, swift id (" << k.id
+ ldpp_dout(dpp, 0) << "WARNING: can't store user info, swift id (" << k.id
<< ") already mapped to another user (" << info.user_id << ")" << dendl;
return -EEXIST;
}
int r = svc.user->get_user_info_by_access_key(ctx, k.id, &inf, nullptr, nullptr, y, dpp);
if (r >= 0 && inf.user_id != info.user_id &&
(!old_info || inf.user_id != old_info->user_id)) {
- ldout(svc.meta_be->ctx(), 0) << "WARNING: can't store user info, access key already mapped to another user" << dendl;
+ ldpp_dout(dpp, 0) << "WARNING: can't store user info, access key already mapped to another user" << dendl;
return -EEXIST;
}
}
if (!old_info.user_id.empty() &&
old_info.user_id != new_info.user_id) {
if (old_info.user_id.tenant != new_info.user_id.tenant) {
- ldout(svc.user->ctx(), 0) << "ERROR: tenant mismatch: " << old_info.user_id.tenant << " != " << new_info.user_id.tenant << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: tenant mismatch: " << old_info.user_id.tenant << " != " << new_info.user_id.tenant << dendl;
return -EINVAL;
}
ret = svc.user->remove_uid_index(ctx, old_info, nullptr, y, dpp);
auto kiter = info.access_keys.begin();
for (; kiter != info.access_keys.end(); ++kiter) {
- ldout(cct, 10) << "removing key index: " << kiter->first << dendl;
+ ldpp_dout(dpp, 10) << "removing key index: " << kiter->first << dendl;
ret = remove_key_index(_ctx, kiter->second, y);
if (ret < 0 && ret != -ENOENT) {
ldout(cct, 0) << "ERROR: could not remove " << kiter->first << " (access key object), should be fixed (err=" << ret << ")" << dendl;
auto siter = info.swift_keys.begin();
for (; siter != info.swift_keys.end(); ++siter) {
auto& k = siter->second;
- ldout(cct, 10) << "removing swift subuser index: " << k.id << dendl;
+ ldpp_dout(dpp, 10) << "removing swift subuser index: " << k.id << dendl;
/* check if swift mapping exists */
ret = remove_swift_name_index(_ctx, k.id, y);
if (ret < 0 && ret != -ENOENT) {
}
}
- ldout(cct, 10) << "removing email index: " << info.user_email << dendl;
+ ldpp_dout(dpp, 10) << "removing email index: " << info.user_email << dendl;
ret = remove_email_index(_ctx, info.user_email, y);
if (ret < 0 && ret != -ENOENT) {
ldout(cct, 0) << "ERROR: could not remove email index object for "
}
rgw_raw_obj uid_bucks = get_buckets_obj(info.user_id);
- ldout(cct, 10) << "removing user buckets index" << dendl;
+ ldpp_dout(dpp, 10) << "removing user buckets index" << dendl;
RGWSI_MetaBackend_SObj::Context_SObj *ctx = static_cast<RGWSI_MetaBackend_SObj::Context_SObj *>(_ctx);
auto sysobj = ctx->obj_ctx->get_obj(uid_bucks);
ret = sysobj.wop().remove(y);
int RGWSI_User_RADOS::remove_uid_index(RGWSI_MetaBackend::Context *ctx, const RGWUserInfo& user_info, RGWObjVersionTracker *objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp)
{
- ldout(cct, 10) << "removing user index: " << user_info.user_id << dendl;
+ ldpp_dout(dpp, 10) << "removing user index: " << user_info.user_id << dendl;
RGWSI_MBSObj_RemoveParams params;
int ret = svc.meta_be->remove(ctx, get_meta_key(user_info.user_id), params, objv_tracker, y, dpp);
string key;
user_info.user_id.to_str(key);
rgw_raw_obj uid_obj(svc.zone->get_zone_params().user_uid_pool, key);
- ldout(cct, 0) << "ERROR: could not remove " << user_info.user_id << ":" << uid_obj << ", should be fixed (err=" << ret << ")" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: could not remove " << user_info.user_id << ":" << uid_obj << ", should be fixed (err=" << ret << ")" << dendl;
return ret;
}
return ret;
}
} catch (buffer::error& err) {
- ldout(svc.meta_be->ctx(), 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl;
+ ldpp_dout(dpp, 0) << "ERROR: failed to decode user info, caught buffer::error" << dendl;
return -EIO;
}
ldpp_dout(dpp, 0) << "failed reading realm info: ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
} else if (ret != -ENOENT) {
- ldout(cct, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl;
+ ldpp_dout(dpp, 20) << "realm " << realm->get_name() << " " << realm->get_id() << dendl;
ret = current_period->init(cct, sysobj_svc, realm->get_id(), y,
realm->get_name());
if (ret < 0 && ret != -ENOENT) {
- ldout(cct, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << "failed reading current period info: " << " " << cpp_strerror(-ret) << dendl;
return ret;
}
- ldout(cct, 20) << "current period " << current_period->get_id() << dendl;
+ ldpp_dout(dpp, 20) << "current period " << current_period->get_id() << dendl;
}
ret = replace_region_with_zonegroup(dpp, y);
if (ret < 0) {
- lderr(cct) << "failed converting region to zonegroup : ret "<< ret << " " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "failed converting region to zonegroup : ret "<< ret << " " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = convert_regionmap(dpp, y);
if (ret < 0) {
- lderr(cct) << "failed converting regionmap: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, -1) << "failed converting regionmap: " << cpp_strerror(-ret) << dendl;
return ret;
}
}
ret = new_realm.create(dpp, y);
if (ret < 0 && ret != -EEXIST) {
- ldout(cct, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl;
+ ldpp_dout(dpp, 0) << __func__ << " Error creating new realm: " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = new_realm.set_as_default(y);
ldout(cct, 10) << "Creating default zonegroup " << dendl;
ret = zonegroup->create_default(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
+ ldpp_dout(dpp, 0) << "failure in zonegroup create_default: ret "<< ret << " " << cpp_strerror(-ret)
<< dendl;
return ret;
}
return ret;
}
}
- ldout(cct, 20) << "zonegroup " << zonegroup->get_name() << dendl;
+ ldpp_dout(dpp, 20) << "zonegroup " << zonegroup->get_name() << dendl;
if (zonegroup->is_master_zonegroup()) {
// use endpoints from the zonegroup's master zone
auto master = zonegroup->zones.find(zonegroup->master_zone);
} else if (ret == -ENOENT) {
ret = zonegroup.create(dpp, y);
if (ret < 0) {
- ldout(cct, 0) << "Error could not create " << zonegroup.get_name() << ": " <<
+ ldpp_dout(dpp, 0) << "Error could not create " << zonegroup.get_name() << ": " <<
cpp_strerror(-ret) << dendl;
return ret;
}