rgw_bucket& bucket)
{
if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
- ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
+ ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
+ << " is suspended" << dendl;
return -ERR_USER_SUSPENDED;
}
rgw_obj obj;
if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) {
- ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl;
+ ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name
+ << " is suspended" << dendl;
return -ERR_USER_SUSPENDED;
}
if (ret != -ENOENT) {
string bucket_log;
rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log);
- ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl;
+ ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name="
+ << bucket_log << ")" << dendl;
return ret;
}
s->bucket_exists = false;
}
if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) {
- ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl;
+ ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup ("
+ << s->bucket_info.zonegroup << " != "
+ << store->get_zonegroup().get_id() << ")" << dendl;
/* we now need to make sure that the operation actually requires copy source, that is
* it's a copy operation
*/
acct_acl_user.display_name);
ret = 0;
} else {
- ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id="
- << s->user->user_id
- << ", ret="
- << ret
- << ")" << dendl;
+ ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL "
+ "(user_id=" << s->user->user_id << ", ret=" << ret << ")" << dendl;
return ret;
}
}
// Really this is a can't happen condition. We parse the policy
// when it's given to us, so perhaps we should abort or otherwise
// raise bloody murder.
- lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl;
+ ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl;
ret = -EACCES;
}
bool success = store->get_redirect_zone_endpoint(&s->redirect_zone_endpoint);
if (success) {
- ldout(s->cct, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
+ ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl;
}
return ret;
auto bliter = bl.begin();
tagset.decode(bliter);
} catch (buffer::error& err) {
- ldout(s->cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
+ ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl;
return -EIO;
}
{
uint32_t required_mask = op_mask();
- ldout(s->cct, 20) << "required_mask= " << required_mask
- << " user.op_mask=" << s->user->op_mask << dendl;
+ ldpp_dout(this, 20) << "required_mask= " << required_mask
+ << " user.op_mask=" << s->user->op_mask << dendl;
if ((s->user->op_mask & required_mask) != required_mask) {
return -EPERM;
}
if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) {
- ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl;
+ ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a "
+ "non-system user, permission denied" << dendl;
return -EPERM;
}
op_ret = get_obj_attrs(store, s, obj, attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj
- << " ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj
+ << " ret=" << op_ret << dendl;
return;
}
int RGWOp::do_aws4_auth_completion()
{
- ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
+ ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl;
if (s->auth.completer) {
if (!s->auth.completer->complete()) {
return -ERR_AMZ_CONTENT_SHA256_MISMATCH;
} else {
- dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
+ ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl;
}
/* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first
map<string, bufferlist>::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS);
if (aiter == s->bucket_attrs.end()) {
- ldout(s->cct, 20) << "no CORS configuration attr found" << dendl;
+ ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl;
cors_exist = false;
return 0; /* no CORS configuration found */
}
try {
bucket_cors.decode(iter);
} catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
RGWCORSConfiguration_S3 *s3cors = static_cast<RGWCORSConfiguration_S3 *>(&bucket_cors);
- ldout(s->cct, 15) << "Read RGWCORSConfiguration";
+ ldpp_dout(this, 15) << "Read RGWCORSConfiguration";
s3cors->to_xml(*_dout);
*_dout << dendl;
}
}
if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
return false;
}
const off_t start_ofs,
const off_t end_ofs)
{
- ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl;
+ ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name
+ << "[" << ent.key.instance << "]" << dendl;
RGWGetObj_CB cb(this);
RGWGetObj_Filter* filter = &cb;
boost::optional<RGWGetObj_Decompress> decompress;
RGWObjectCtx obj_ctx(store);
RGWAccessControlPolicy obj_policy(s->cct);
- ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl;
+ ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs
+ << " end=" << cur_end << dendl;
obj_ctx.obj.set_atomic(part);
store->set_prefetch_data(&obj_ctx, part);
bool need_decompress;
op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
- return -EIO;
+ ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl;
+ return -EIO;
}
if (need_decompress)
{
if (cs_info.orig_size != ent.meta.accounted_size) {
// hmm.. something wrong, object not as expected, abort!
- ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size <<
- ", actual read size=" << ent.meta.size << dendl;
+ ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size
+ << ", actual read size=" << ent.meta.size << dendl;
return -EIO;
}
decompress.emplace(s->cct, &cs_info, partial_content, filter);
{
if (obj_size != ent.meta.size) {
// hmm.. something wrong, object not as expected, abort!
- ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl;
+ ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size
+ << ", actual read size=" << ent.meta.size << dendl;
return -EIO;
}
}
/* We can use global user_acl because LOs cannot have segments
* stored inside different accounts. */
if (s->system_request) {
- ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl;
+ ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl;
} else if (s->auth.identity->is_admin_of(s->user->user_id)) {
- ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl;
+ ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl;
} else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl,
&obj_policy, bucket_policy, action)) {
return -EPERM;
int RGWGetObj::handle_user_manifest(const char *prefix)
{
const boost::string_view prefix_view(prefix);
- ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix="
+ ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix="
<< prefix_view << dendl;
const size_t pos = prefix_view.find('/');
bucket_name, bucket_info, NULL,
&bucket_attrs);
if (r < 0) {
- ldout(s->cct, 0) << "could not get bucket info for bucket="
+ ldpp_dout(this, 0) << "could not get bucket info for bucket="
<< bucket_name << dendl;
return r;
}
bucket_acl = &_bucket_acl;
r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket);
if (r < 0) {
- ldout(s->cct, 0) << "failed to read bucket policy" << dendl;
+ ldpp_dout(this, 0) << "failed to read bucket policy" << dendl;
return r;
}
_bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs,
try {
decode(slo_info, bliter);
} catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
return -EIO;
}
- ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
+ ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl;
vector<RGWAccessControlPolicy> allocated_acls;
map<string, pair<RGWAccessControlPolicy *, boost::optional<Policy>>> policies;
bucket_name, bucket_info, nullptr,
&bucket_attrs);
if (r < 0) {
- ldout(s->cct, 0) << "could not get bucket info for bucket="
+ ldpp_dout(this, 0) << "could not get bucket info for bucket="
<< bucket_name << dendl;
return r;
}
r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl,
bucket);
if (r < 0) {
- ldout(s->cct, 0) << "failed to read bucket ACL for bucket "
+ ldpp_dout(this, 0) << "failed to read bucket ACL for bucket "
<< bucket << dendl;
return r;
}
part.obj_name = obj_name;
part.size = entry.size_bytes;
part.etag = entry.etag;
- ldout(s->cct, 20) << "slo_part: ofs=" << ofs
+ ldpp_dout(this, 20) << "slo_part: ofs=" << ofs
<< " bucket=" << part.bucket
<< " obj=" << part.obj_name
<< " size=" << part.size
complete_etag(etag_sum, &lo_etag);
s->obj_size = slo_info.total_size;
- ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl;
+ ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl;
int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end);
if (r < 0) {
if (start_time > gc_invalidate_time) {
int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj);
if (r < 0) {
- dout(0) << "WARNING: could not defer gc entry for obj" << dendl;
+ ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl;
}
gc_invalidate_time = start_time;
gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2);
{
attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE);
if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") {
- ldout(s->cct, 0) << "ERROR: torrents are not supported for objects "
+ ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects "
"encrypted with SSE-C" << dendl;
op_ret = -EINVAL;
goto done_err;
op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj);
if (op_ret < 0)
{
- ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
+ ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret
<< dendl;
goto done_err;
}
op_ret = send_response_data(bl, 0, total_len);
if (op_ret < 0)
{
- ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret
- << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl;
goto done_err;
}
return;
op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
+ ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
goto done_err;
}
if (need_decompress) {
if (attr_iter != attrs.end() && !skip_manifest) {
op_ret = handle_user_manifest(attr_iter->second.c_str());
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret="
+ ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret="
<< op_ret << dendl;
goto done_err;
}
is_slo = true;
op_ret = handle_slo_manifest(attr_iter->second);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
+ ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret
<< dendl;
goto done_err;
}
if (op_ret < 0) {
/* hmm.. something wrong here.. the user was authenticated, so it
should exist */
- ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
+ ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
<< s->user->user_id << dendl;
break;
}
if (!start_date.empty()) {
op_ret = utime_t::parse_date(start_date, &start_epoch, NULL);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl;
return;
}
}
if (!end_date.empty()) {
op_ret = utime_t::parse_date(end_date, &end_epoch, NULL);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl;
return;
}
}
op_ret = rgw_user_sync_all_stats(store, s->user->user_id);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to sync user stats" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl;
return;
}
op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: failed to get user's buckets stats" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl;
return;
}
string user_str = s->user->user_id.to_str();
op_ret = store->cls_user_get_header(user_str, &header);
if (op_ret < 0) {
- ldout(store->ctx(), 0) << "ERROR: can't read user header" << dendl;
+ ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl;
return;
}
if (op_ret < 0) {
/* hmm.. something wrong here.. the user was authenticated, so it
should exist */
- ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid="
+ ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid="
<< s->user->user_id << dendl;
break;
} else {
if (!store->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
}
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
<< " returned err=" << op_ret << dendl;
return;
}
if (!store->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
});
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ << " returned err=" << op_ret << dendl;
return;
}
}
return op_ret;
});
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ << " returned err=" << op_ret << dendl;
return;
}
}
}
if (allow_unordered && !delimiter.empty()) {
- ldout(s->cct, 0) <<
+ ldpp_dout(this, 0) <<
"ERROR: unordered bucket listing requested with a delimiter" << dendl;
op_ret = -EINVAL;
return;
}
if (s->user->user_id.tenant != s->bucket_tenant) {
- ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
+ ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
<< " (user_id.tenant=" << s->user->user_id.tenant
<< " requested=" << s->bucket_tenant << ")"
<< dendl;
JSONParser *jp, req_info *forward_info)
{
if (!store->rest_master_conn) {
- ldout(s->cct, 0) << "rest connection is invalid" << dendl;
+ ldpp_dout(s, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
- ldout(s->cct, 0) << "sending request to master zonegroup" << dendl;
+ ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl;
bufferlist response;
string uid_str = s->user->user_id.to_str();
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
if (ret < 0)
return ret;
- ldout(s->cct, 20) << "response: " << response.c_str() << dendl;
+ ldpp_dout(s, 20) << "response: " << response.c_str() << dendl;
if (jp && !jp->parse(response.c_str(), response.length())) {
- ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl;
+ ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl;
return -EINVAL;
}
if (!location_constraint.empty() &&
!store->has_zonegroup_api(location_constraint)) {
- ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
+ ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " can't be found." << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
s->err.message = "The specified location-constraint is not valid";
if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() &&
store->get_zonegroup().api_name != location_constraint) {
- ldout(s->cct, 0) << "location constraint (" << location_constraint << ")"
+ ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")"
<< " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")"
<< dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
const auto& zonegroup = store->get_zonegroup();
if (!placement_rule.empty() &&
!zonegroup.placement_targets.count(placement_rule)) {
- ldout(s->cct, 0) << "placement target (" << placement_rule << ")"
+ ldpp_dout(this, 0) << "placement target (" << placement_rule << ")"
<< " doesn't exist in the placement targets of zonegroup"
<< " (" << store->get_zonegroup().api_name << ")" << dendl;
op_ret = -ERR_INVALID_LOCATION_CONSTRAINT;
JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
JSONDecoder::decode_json("object_ver", objv, &jp);
JSONDecoder::decode_json("bucket_info", master_info, &jp);
- ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
- ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl;
+ ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
+ ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl;
pmaster_bucket= &master_info.bucket;
creation_time = master_info.creation_time;
pmaster_num_shards = &master_info.num_shards;
pmaster_bucket, pmaster_num_shards, true);
/* continue if EEXIST and create_bucket will fail below. this way we can
* recover from a partial create by retrying it. */
- ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
+ ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl;
if (op_ret && op_ret != -EEXIST)
return;
op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant,
s->bucket.name);
if (op_ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
+ ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
<< dendl;
}
} else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
}
if (!s->bucket_exists) {
- ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
+ ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl;
op_ret = -ERR_NO_SUCH_BUCKET;
return;
}
string err;
ver = strict_strtol(ver_str.c_str(), 10, &err);
if (!err.empty()) {
- ldout(s->cct, 0) << "failed to parse ver param" << dendl;
+ ldpp_dout(this, 0) << "failed to parse ver param" << dendl;
op_ret = -EINVAL;
return;
}
op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info);
if ( op_ret < 0) {
- ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
+ ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl;
}
op_ret = store->check_bucket_empty(s->bucket_info);
op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant,
s->bucket.name, false);
if (op_ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
+ ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret
<< dendl;
}
}
auto op_ret = get_params();
if (op_ret < 0) {
- ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl;
return op_ret;
}
part_num = s->info.args.get("partNumber");
if (part_num.empty()) {
- ldout(s->cct, 10) << "part number is empty" << dendl;
+ ldpp_dout(s, 10) << "part number is empty" << dendl;
return -EINVAL;
}
uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err);
if (!err.empty()) {
- ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl;
+ ldpp_dout(s, 10) << "bad part number: " << part_num << ": " << err << dendl;
return -EINVAL;
}
bool need_decompress;
op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info);
if (op_ret < 0) {
- lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl;
- return -EIO;
+ ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl;
+ return -EIO;
}
bool partial_content = true;
op_ret = get_system_versioning_params(s, &olh_epoch, &version_id);
if (op_ret < 0) {
- ldout(s->cct, 20) << "get_system_versioning_params() returned ret="
+ ldpp_dout(this, 20) << "get_system_versioning_params() returned ret="
<< op_ret << dendl;
goto done;
}
if (supplied_md5_b64) {
need_calc_md5 = true;
- ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
+ ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
- ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
+ ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
op_ret = -ERR_INVALID_DIGEST;
goto done;
}
buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
- ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
+ ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
}
if (!chunked_upload) { /* with chunked upload we don't know how big is the upload.
op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->content_length);
if (op_ret < 0) {
- ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl;
goto done;
}
op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
- ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
goto done;
}
}
op_ret = processor->prepare(store, NULL);
if (op_ret < 0) {
- ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret
+ ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret
<< dendl;
goto done;
}
op_ret = store->get_obj_state(static_cast<RGWObjectCtx *>(s->obj_ctx),
copy_source_bucket_info, obj, &astate, true, false);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
+ ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl;
goto done;
}
if (!astate->exists){
if (compression_type != "none") {
plugin = get_compressor_plugin(s, compression_type);
if (!plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for compression type "
+ ldpp_dout(this, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
} else {
compressor.emplace(s->cct, plugin, filter);
}
if (len < 0) {
op_ret = len;
- ldout(s->cct, 20) << "get_data() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl;
goto done;
}
op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait);
if (op_ret < 0) {
if (!need_to_wait || op_ret != -EEXIST) {
- ldout(s->cct, 20) << "processor->thottle_data() returned ret="
+ ldpp_dout(this, 20) << "processor->thottle_data() returned ret="
<< op_ret << dendl;
goto done;
}
/* need_to_wait == true and op_ret == -EEXIST */
- ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
+ ldpp_dout(this, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl;
/* restore original data */
data.swap(orig_data);
op_ret = processor->prepare(store, &oid_rand);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: processor->prepare() returned "
+ ldpp_dout(this, 0) << "ERROR: processor->prepare() returned "
<< op_ret << dendl;
goto done;
}
op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket,
user_quota, bucket_quota, s->obj_size);
if (op_ret < 0) {
- ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl;
goto done;
}
op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota);
if (op_ret < 0) {
- ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl;
goto done;
}
cs_info.blocks = move(compressor->get_compression_blocks());
encode(cs_info, tmp);
attrs[RGW_ATTR_COMPRESSION] = tmp;
- ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION
+ ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION
<< " with type=" << cs_info.compression_type
<< ", orig_size=" << cs_info.orig_size
<< ", blocks=" << cs_info.blocks.size() << dendl;
if (dlo_manifest) {
op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
+ ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
goto done;
}
complete_etag(hash, &etag);
- ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
+ ldpp_dout(this, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
}
if (slo_info) {
hash.Update((unsigned char *)slo_info->raw_data, slo_info->raw_data_len);
complete_etag(hash, &etag);
- ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
+ ldpp_dout(this, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl;
}
if (supplied_etag && etag.compare(supplied_etag) != 0) {
op_ret = torrent.complete();
if (0 != op_ret)
{
- ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
+ ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl;
goto done;
}
}
if (supplied_md5_b64) {
char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1];
- ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
+ ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl;
op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1],
supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64));
- ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl;
+ ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl;
if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) {
op_ret = -ERR_INVALID_DIGEST;
return;
}
buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5);
- ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl;
+ ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl;
}
RGWPutObjProcessor_Atomic processor(*static_cast<RGWObjectCtx *>(s->obj_ctx),
if (compression_type != "none") {
plugin = Compressor::create(s->cct, compression_type);
if (!plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for compression type "
+ ldpp_dout(this, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
} else {
compressor.emplace(s->cct, plugin, filter);
if (dlo_manifest) {
op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl;
+ ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl;
return;
}
}
try {
decode(slo_info, bliter);
} catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl;
return -EIO;
}
if (s->bucket_info.mfa_enabled() &&
!s->object.instance.empty() &&
!s->mfa_verified) {
- ldout(s->cct, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
+ ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl;
return -ERR_MFA_REQUIRED;
}
if (slo_attr != attrs.end()) {
op_ret = handle_slo_manifest(slo_attr->second);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl;
}
} else {
op_ret = -ERR_NOT_SLO_MANIFEST;
op_ret = get_params();
if (op_ret < 0) {
if (op_ret == -ERANGE) {
- ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = "
+ ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = "
<< s->length << dendl;
op_ret = -ERR_MALFORMED_XML;
s->err.message = "The XML you provided was larger than the maximum " +
return;
}
- ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
+ ldpp_dout(this, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
if (!s->canned_acl.empty() && len) {
op_ret = -EINVAL;
int grants_num = req_grant_map.size();
if (grants_num > max_num) {
- ldout(s->cct, 4) << "An acl can have up to "
- << max_num
- << " grants, request acl grants num: "
- << grants_num << dendl;
+ ldpp_dout(this, 4) << "An acl can have up to " << max_num
+ << " grants, request acl grants num: " << grants_num << dendl;
op_ret = -ERR_MALFORMED_ACL_ERROR;
s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum "
+ std::to_string(max_num)
}
op_ret = forward_request_to_master(s, NULL, store, in_data, NULL);
if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
- ldout(s->cct, 15) << "Old AccessControlPolicy";
+ ldpp_dout(this, 15) << "Old AccessControlPolicy";
policy->to_xml(*_dout);
*_dout << dendl;
}
return;
if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
- ldout(s->cct, 15) << "New AccessControlPolicy:";
+ ldpp_dout(this, 15) << "New AccessControlPolicy:";
new_policy.to_xml(*_dout);
*_dout << dendl;
}
if (content_md5 == nullptr) {
op_ret = -ERR_INVALID_REQUEST;
s->err.message = "Missing required header for this request: Content-MD5";
- ldout(s->cct, 5) << s->err.message << dendl;
+ ldpp_dout(this, 5) << s->err.message << dendl;
return;
}
} catch (...) {
s->err.message = "Request header Content-MD5 contains character "
"that is not base64 encoded.";
- ldout(s->cct, 5) << s->err.message << dendl;
+ ldpp_dout(this, 5) << s->err.message << dendl;
op_ret = -ERR_BAD_DIGEST;
return;
}
if (op_ret < 0)
return;
- ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
+ ldpp_dout(this, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl;
MD5 data_hash;
unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE];
if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) {
op_ret = -ERR_BAD_DIGEST;
s->err.message = "The Content-MD5 you specified did not match what we received.";
- ldout(s->cct, 5) << s->err.message
+ ldpp_dout(this, 5) << s->err.message
<< " Specified content md5: " << content_md5
<< ", calculated content md5: " << data_hash_res
<< dendl;
return;
if (s->cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
- ldout(s->cct, 15) << "New LifecycleConfiguration:";
+ ldpp_dout(this, 15) << "New LifecycleConfiguration:";
new_config.to_xml(*_dout);
*_dout << dendl;
}
do {
op_ret = l.lock_exclusive(ctx, oid);
if (op_ret == -EBUSY) {
- dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl;
- sleep(5);
+ ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
+ << oid << ", sleep 5, try again" << dendl;
+ sleep(5); // XXX: return retryable error
continue;
}
if (op_ret < 0) {
- dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to acquire lock on "
+ << oid << ", ret=" << op_ret << dendl;
break;
}
op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry);
if (op_ret < 0) {
- dout(0) << "RGWLC::RGWPutLC() failed to set entry on " << oid << ", ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to set entry on "
+ << oid << ", ret=" << op_ret << dendl;
}
break;
}while(1);
op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
&s->bucket_info.objv_tracker);
if (op_ret < 0) {
- ldout(s->cct, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" << s->bucket.name
- << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket="
+ << s->bucket.name << " returned err=" << op_ret << dendl;
return;
}
string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id;
do {
op_ret = l.lock_exclusive(ctx, oid);
if (op_ret == -EBUSY) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl;
- sleep(5);
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to acquire lock on "
+ << oid << ", sleep 5, try again" << dendl;
+ sleep(5); // XXX: return retryable error
continue;
}
if (op_ret < 0) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " << oid << ", ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to acquire lock on "
+ << oid << ", ret=" << op_ret << dendl;
break;
}
op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry);
if (op_ret < 0) {
- dout(0) << "RGWLC::RGWDeleteLC() failed to rm entry on " << oid << ", ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to rm entry on "
+ << oid << ", ret=" << op_ret << dendl;
}
break;
}while(1);
return ;
if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
op_ret = -ENOENT;
return;
}
if (!store->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
- ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
return op_ret;
if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
op_ret = -ENOENT;
return op_ret;
}
op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs,
&s->bucket_info.objv_tracker);
if (op_ret < 0) {
- ldout(s->cct, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
+ ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name
<< " returned err=" << op_ret << dendl;
}
return op_ret;
int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) {
rule = cc->host_name_rule(origin);
if (!rule) {
- dout(10) << "There is no cors rule present for " << origin << dendl;
+ ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl;
return -ENOENT;
}
origin = s->info.env->get("HTTP_ORIGIN");
if (!origin) {
- dout(0) <<
- "Preflight request without mandatory Origin header"
- << dendl;
+ ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl;
op_ret = -EINVAL;
return;
}
req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD");
if (!req_meth) {
- dout(0) <<
- "Preflight request without mandatory Access-control-request-method header"
- << dendl;
+ ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl;
op_ret = -EINVAL;
return;
}
if (!cors_exist) {
- dout(2) << "No CORS configuration set yet for this bucket" << dendl;
+ ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl;
op_ret = -ENOENT;
return;
}
op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(),
&s->bucket_attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
<< " returned err=" << op_ret << dendl;
return;
}
try {
decode(*policy, bli);
} catch (buffer::error& err) {
- ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
+ ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl;
return -EIO;
}
break;
op_ret = serializer.try_lock(raw_obj.oid, dur);
if (op_ret < 0) {
- dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl;
+ ldpp_dout(this, 0) << "failed to acquire lock" << dendl;
op_ret = -ERR_INTERNAL_ERROR;
s->err.message = "This multipart completion is already in progress";
return;
op_ret = get_obj_attrs(store, s, meta_obj, attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
+ ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj
<< " ret=" << op_ret << dendl;
return;
}
total_parts += obj_parts.size();
if (!truncated && total_parts != (int)parts->parts.size()) {
- ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts
+ ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts
<< " expected: " << parts->parts.size() << dendl;
op_ret = -ERR_INVALID_PART;
return;
char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
if (iter->first != (int)obj_iter->first) {
- ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: "
+ ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: "
<< iter->first << " next uploaded: "
<< obj_iter->first << dendl;
op_ret = -ERR_INVALID_PART;
}
string part_etag = rgw_string_unquote(iter->second);
if (part_etag.compare(obj_iter->second.etag) != 0) {
- ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first
+ ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first
<< " etag: " << iter->second << dendl;
op_ret = -ERR_INVALID_PART;
return;
src_obj.init_ns(s->bucket, oid, mp_ns);
if (obj_part.manifest.empty()) {
- ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj="
+ ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj="
<< src_obj << dendl;
op_ret = -ERR_INVALID_PART;
return;
if ((obj_iter != obj_parts.begin()) &&
((part_compressed != compressed) ||
(cs_info.compression_type != obj_part.cs_info.compression_type))) {
- ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload ("
+ ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload ("
<< cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
op_ret = -ERR_INVALID_PART;
return;
snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
"-%lld", (long long)parts->parts.size());
etag = final_etag_str;
- ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl;
+ ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl;
etag_bl.append(final_etag_str, strlen(final_etag_str));
/* serializer's exclusive lock is released */
serializer.clear_locked();
} else {
- ldout(store->ctx(), 0) << "WARNING: failed to remove object "
- << meta_obj << dendl;
+ ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl;
}
}
if (unlikely(serializer.locked)) {
int r = serializer.unlock();
if (r < 0) {
- ldout(store->ctx(), 0) << "WARNING: failed to unlock "
- << serializer.oid << dendl;
+ ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl;
}
}
send_response();
}
}
if (has_versioned && !s->mfa_verified) {
- ldout(s->cct, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
+ ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl;
op_ret = -ERR_MFA_REQUIRED;
goto error;
}
ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant,
binfo.bucket.name, false);
if (ret < 0) {
- ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret
- << dendl;
+ ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl;
}
}
if (ret < 0) {
binfo_fail:
if (-ENOENT == ret) {
- ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl;
+ ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl;
num_unfound++;
} else {
- ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret
- << dendl;
+ ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl;
fail_desc_t failed_item = {
.err = ret,
return false;
auth_fail:
- ldout(store->ctx(), 20) << "wrong auth for " << path << dendl;
+ ldpp_dout(s, 20) << "wrong auth for " << path << dendl;
{
fail_desc_t failed_item = {
.err = ret,
delop_fail:
if (-ENOENT == ret) {
- ldout(store->ctx(), 20) << "cannot find entry " << path << dendl;
+ ldpp_dout(s, 20) << "cannot find entry " << path << dendl;
num_unfound++;
} else {
fail_desc_t failed_item = {
bool RGWBulkDelete::Deleter::delete_chunk(const std::list<acct_path_t>& paths)
{
- ldout(store->ctx(), 20) << "in delete_chunk" << dendl;
+ ldpp_dout(s, 20) << "in delete_chunk" << dendl;
for (auto path : paths) {
- ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl;
+ ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl;
delete_single(path);
}
}
if (s->user->user_id.tenant != s->bucket_tenant) {
- ldout(s->cct, 10) << "user cannot create a bucket in a different tenant"
- << " (user_id.tenant=" << s->user->user_id.tenant
- << " requested=" << s->bucket_tenant << ")"
- << dendl;
+ ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant"
+ << " (user_id.tenant=" << s->user->user_id.tenant
+ << " requested=" << s->bucket_tenant << ")" << dendl;
return -EACCES;
}
int RGWBulkUploadOp::handle_dir(const boost::string_ref path)
{
- ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl;
+ ldpp_dout(this, 20) << "got directory=" << path << dendl;
op_ret = handle_dir_verify_permission();
if (op_ret < 0) {
JSONDecoder::decode_json("object_ver", objv, &jp);
JSONDecoder::decode_json("bucket_info", master_info, &jp);
- ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver="
- << objv.ver << dendl;
- ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time
- << dendl;
+ ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
+ ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl;
pmaster_bucket= &master_info.bucket;
creation_time = master_info.creation_time;
nullptr);
if (selected_placement_rule != binfo.placement_rule) {
op_ret = -EEXIST;
- ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl;
+ ldpp_dout(this, 20) << "non-coherent placement rule" << dendl;
return op_ret;
}
}
pmaster_bucket, pmaster_num_shards, true);
/* continue if EEXIST and create_bucket will fail below. this way we can
* recover from a partial create by retrying it. */
- ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret
- << ", bucket=" << bucket << dendl;
+ ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret
+ << ", bucket=" << bucket << dendl;
if (op_ret && op_ret != -EEXIST) {
return op_ret;
*/
if (out_info.owner.compare(s->user->user_id) != 0) {
op_ret = -EEXIST;
- ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl;
+ ldpp_dout(this, 20) << "conflicting bucket name" << dendl;
return op_ret;
}
bucket = out_info.bucket;
op_ret = rgw_unlink_bucket(store, s->user->user_id,
bucket.tenant, bucket.name);
if (op_ret < 0) {
- ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret="
- << op_ret << dendl;
+ ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl;
}
} else if (op_ret == -EEXIST || (op_ret == 0 && existed)) {
- ldout(s->cct, 20) << "bulk upload: containers already exists"
- << dendl;
+ ldpp_dout(this, 20) << "containers already exists" << dendl;
op_ret = -ERR_BUCKET_EXISTS;
}
RGWAccessControlPolicy bacl(store->ctx());
op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket"
- << dendl;
+ ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl;
return false;
}
AlignedStreamGetter& body)
{
- ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size
- << dendl;
+ ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl;
RGWPutObjDataProcessor *filter = nullptr;
boost::optional<RGWPutObj_Compress> compressor;
op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant,
bucket_name, binfo, nullptr, &battrs);
if (op_ret == -ENOENT) {
- ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name
- << dendl;
+ ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl;
} else if (op_ret < 0) {
return op_ret;
}
if (! handle_file_verify_permission(binfo,
rgw_obj(binfo.bucket, object),
battrs, bowner)) {
- ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl;
+ ldpp_dout(this, 20) << "object creation unauthorized" << dendl;
op_ret = -EACCES;
return op_ret;
}
op_ret = processor.prepare(store, nullptr);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret="
- << op_ret << dendl;
+ ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl;
return op_ret;
}
if (compression_type != "none") {
plugin = Compressor::create(s->cct, compression_type);
if (! plugin) {
- ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type "
- << compression_type << dendl;
+ ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type "
+ << compression_type << dendl;
} else {
compressor.emplace(s->cct, plugin, filter);
filter = &*compressor;
ceph::bufferlist data;
len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data);
- ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl;
+ ldpp_dout(this, 20) << "body=" << data.c_str() << dendl;
if (len < 0) {
op_ret = len;
return op_ret;
hash.Update((const unsigned char *)data.c_str(), data.length());
op_ret = put_data_and_throttle(filter, data, ofs, false);
if (op_ret < 0) {
- ldout(s->cct, 20) << "processor->thottle_data() returned ret="
- << op_ret << dendl;
+ ldpp_dout(this, 20) << "processor->thottle_data() returned ret=" << op_ret << dendl;
return op_ret;
}
} while (len > 0);
if (ofs != size) {
- ldout(s->cct, 10) << "bulk upload: real file size different from declared"
- << dendl;
+ ldpp_dout(this, 10) << "real file size different from declared" << dendl;
op_ret = -EINVAL;
}
op_ret = store->check_quota(bowner.get_id(), binfo.bucket,
user_quota, bucket_quota, size);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path
- << dendl;
+ ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl;
return op_ret;
}
op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs,
ceph::real_time() /* delete_at */);
if (op_ret < 0) {
- ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret="
- << op_ret << dendl;
+ ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl;
}
return op_ret;
{
ceph::bufferlist buffer(64 * 1024);
- ldout(s->cct, 20) << "bulk upload: start" << dendl;
+ ldpp_dout(this, 20) << "start" << dendl;
/* Create an instance of stream-abstracting class. Having this indirection
* allows for easy introduction of decompressors like gzip and bzip2. */
do {
op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer);
if (op_ret < 0) {
- ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl;
+ ldpp_dout(this, 2) << "cannot read header" << dendl;
return;
}
* will be ignored but won't cease the whole upload. */
switch (header->get_filetype()) {
case rgw::tar::FileType::NORMAL_FILE: {
- ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl;
+ ldpp_dout(this, 2) << "handling regular file" << dendl;
boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \
file_prefix + header->get_filename().to_string();
break;
}
case rgw::tar::FileType::DIRECTORY: {
- ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl;
+ ldpp_dout(this, 2) << "handling regular directory" << dendl;
boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path;
op_ret = handle_dir(dirname);
* terminates whole upload immediately. */
if (boost::algorithm::contains(std::initializer_list<int>{ op_ret },
terminal_errors)) {
- ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret
- << dendl;
+ ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl;
break;
}
} else {
- ldout(s->cct, 2) << "bulk upload: an empty block" << dendl;
+ ldpp_dout(this, 2) << "an empty block" << dendl;
op_ret = 0;
}
{
op_ret = get_params();
if (op_ret < 0) {
- ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl;
return;
}
op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ << " returned err=" << op_ret << dendl;
return;
}
}
op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs);
if (op_ret < 0) {
- ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl;
+ ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name
+ << " returned err=" << op_ret << dendl;
return;
}
}
{
int ret = rgw_build_bucket_policies(store, s);
if (ret < 0) {
- ldout(s->cct, 10) << "init_permissions on " << s->bucket
+ ldpp_dout(s, 10) << "init_permissions on " << s->bucket
<< " failed, ret=" << ret << dendl;
return ret==-ENODATA ? -EACCES : ret;
}
int ret = rgw_build_object_policies(store, s, op->prefetch_data());
if (ret < 0) {
- ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":"
+ ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":"
<< s->object << " only_bucket=" << only_bucket
<< " ret=" << ret << dendl;
if (ret == -ENODATA)
if (!store->is_meta_master()) {
op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr);
if (op_ret < 0) {
- ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
+ ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
}
return op_ret;
});
} catch (rgw::IAM::PolicyParseException& e) {
- ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl;
+ ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl;
op_ret = -EINVAL;
}
}
auto attrs = s->bucket_attrs;
map<string, bufferlist>::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY);
if (aiter == attrs.end()) {
- ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr"
- << " bucket_name = " << s->bucket_name << dendl;
+ ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = "
+ << s->bucket_name << dendl;
op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
s->err.message = "The bucket policy does not exist";
return;
policy = attrs[RGW_ATTR_IAM_POLICY];
if (policy.length() == 0) {
- ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl;
+ ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: "
+ << s->bucket_name << dendl;
op_ret = -ERR_NO_SUCH_BUCKET_POLICY;
s->err.message = "The bucket policy does not exist";
return;