From: Casey Bodley Date: Thu, 26 Apr 2018 19:12:02 +0000 (-0400) Subject: rgw: rgw ops log with ldpp_dout() X-Git-Tag: v14.0.0~47^2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=ab200271bfe6e9fb5e4b7ac112c28a18420a1cff;p=ceph.git rgw: rgw ops log with ldpp_dout() Signed-off-by: Casey Bodley --- diff --git a/src/rgw/rgw_op.cc b/src/rgw/rgw_op.cc index f20a51a11334..6a0022f022aa 100644 --- a/src/rgw/rgw_op.cc +++ b/src/rgw/rgw_op.cc @@ -302,7 +302,8 @@ static int read_bucket_policy(RGWRados *store, rgw_bucket& bucket) { if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { - ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; + ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name + << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -332,7 +333,8 @@ static int read_obj_policy(RGWRados *store, rgw_obj obj; if (!s->system_request && bucket_info.flags & BUCKET_SUSPENDED) { - ldout(s->cct, 0) << "NOTICE: bucket " << bucket_info.bucket.name << " is suspended" << dendl; + ldpp_dout(s, 0) << "NOTICE: bucket " << bucket_info.bucket.name + << " is suspended" << dendl; return -ERR_USER_SUSPENDED; } @@ -446,7 +448,8 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s) if (ret != -ENOENT) { string bucket_log; rgw_make_bucket_entry_name(s->bucket_tenant, s->bucket_name, bucket_log); - ldout(s->cct, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" << bucket_log << ")" << dendl; + ldpp_dout(s, 0) << "NOTICE: couldn't get bucket from bucket_name (name=" + << bucket_log << ")" << dendl; return ret; } s->bucket_exists = false; @@ -486,7 +489,9 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s) } if (s->bucket_exists && !store->get_zonegroup().equals(s->bucket_info.zonegroup)) { - ldout(s->cct, 0) << "NOTICE: request for data in a different zonegroup (" << s->bucket_info.zonegroup << " != " << store->get_zonegroup().get_id() << ")" << dendl; + ldpp_dout(s, 0) << "NOTICE: request for data in a different zonegroup (" + << s->bucket_info.zonegroup << " != " + << store->get_zonegroup().get_id() << ")" << dendl; /* we now need to make sure that the operation actually requires copy source, that is * it's a copy operation */ @@ -522,11 +527,8 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s) acct_acl_user.display_name); ret = 0; } else { - ldout(s->cct, 0) << "NOTICE: couldn't get user attrs for handling ACL (user_id=" - << s->user->user_id - << ", ret=" - << ret - << ")" << dendl; + ldpp_dout(s, 0) << "NOTICE: couldn't get user attrs for handling ACL " + "(user_id=" << s->user->user_id << ", ret=" << ret << ")" << dendl; return ret; } } @@ -538,13 +540,13 @@ int rgw_build_bucket_policies(RGWRados* store, struct req_state* s) // Really this is a can't happen condition. We parse the policy // when it's given to us, so perhaps we should abort or otherwise // raise bloody murder. - lderr(s->cct) << "Error reading IAM Policy: " << e.what() << dendl; + ldpp_dout(s, 0) << "Error reading IAM Policy: " << e.what() << dendl; ret = -EACCES; } bool success = store->get_redirect_zone_endpoint(&s->redirect_zone_endpoint); if (success) { - ldout(s->cct, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; + ldpp_dout(s, 20) << "redirect_zone_endpoint=" << s->redirect_zone_endpoint << dendl; } return ret; @@ -593,7 +595,7 @@ static int rgw_iam_add_tags_from_bl(struct req_state* s, bufferlist& bl){ auto bliter = bl.begin(); tagset.decode(bliter); } catch (buffer::error& err) { - ldout(s->cct,0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; + ldpp_dout(s, 0) << "ERROR: caught buffer::error, couldn't decode TagSet" << dendl; return -EIO; } @@ -769,15 +771,16 @@ int RGWOp::verify_op_mask() { uint32_t required_mask = op_mask(); - ldout(s->cct, 20) << "required_mask= " << required_mask - << " user.op_mask=" << s->user->op_mask << dendl; + ldpp_dout(this, 20) << "required_mask= " << required_mask + << " user.op_mask=" << s->user->op_mask << dendl; if ((s->user->op_mask & required_mask) != required_mask) { return -EPERM; } if (!s->system_request && (required_mask & RGW_OP_TYPE_MODIFY) && !store->zone_is_writeable()) { - ldout(s->cct, 5) << "NOTICE: modify request to a read-only zone by a non-system user, permission denied" << dendl; + ldpp_dout(this, 5) << "NOTICE: modify request to a read-only zone by a " + "non-system user, permission denied" << dendl; return -EPERM; } @@ -818,8 +821,8 @@ void RGWGetObjTags::execute() op_ret = get_obj_attrs(store, s, obj, attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << obj - << " ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << obj + << " ret=" << op_ret << dendl; return; } @@ -908,12 +911,12 @@ void RGWDeleteObjTags::execute() int RGWOp::do_aws4_auth_completion() { - ldout(s->cct, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl; + ldpp_dout(this, 5) << "NOTICE: call to do_aws4_auth_completion" << dendl; if (s->auth.completer) { if (!s->auth.completer->complete()) { return -ERR_AMZ_CONTENT_SHA256_MISMATCH; } else { - dout(10) << "v4 auth ok -- do_aws4_auth_completion" << dendl; + ldpp_dout(this, 10) << "v4 auth ok -- do_aws4_auth_completion" << dendl; } /* TODO(rzarzynski): yes, we're really called twice on PUTs. Only first @@ -1014,7 +1017,7 @@ int RGWOp::read_bucket_cors() map::iterator aiter = s->bucket_attrs.find(RGW_ATTR_CORS); if (aiter == s->bucket_attrs.end()) { - ldout(s->cct, 20) << "no CORS configuration attr found" << dendl; + ldpp_dout(this, 20) << "no CORS configuration attr found" << dendl; cors_exist = false; return 0; /* no CORS configuration found */ } @@ -1027,12 +1030,12 @@ int RGWOp::read_bucket_cors() try { bucket_cors.decode(iter); } catch (buffer::error& err) { - ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(this, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } if (s->cct->_conf->subsys.should_gather()) { RGWCORSConfiguration_S3 *s3cors = static_cast(&bucket_cors); - ldout(s->cct, 15) << "Read RGWCORSConfiguration"; + ldpp_dout(this, 15) << "Read RGWCORSConfiguration"; s3cors->to_xml(*_dout); *_dout << dendl; } @@ -1082,7 +1085,7 @@ bool RGWOp::generate_cors_headers(string& origin, string& method, string& header } if (!cors_exist) { - dout(2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; return false; } @@ -1133,7 +1136,8 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, const off_t start_ofs, const off_t end_ofs) { - ldout(s->cct, 20) << "user manifest obj=" << ent.key.name << "[" << ent.key.instance << "]" << dendl; + ldpp_dout(this, 20) << "user manifest obj=" << ent.key.name + << "[" << ent.key.instance << "]" << dendl; RGWGetObj_CB cb(this); RGWGetObj_Filter* filter = &cb; boost::optional decompress; @@ -1149,7 +1153,8 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, RGWObjectCtx obj_ctx(store); RGWAccessControlPolicy obj_policy(s->cct); - ldout(s->cct, 20) << "reading obj=" << part << " ofs=" << cur_ofs << " end=" << cur_end << dendl; + ldpp_dout(this, 20) << "reading obj=" << part << " ofs=" << cur_ofs + << " end=" << cur_end << dendl; obj_ctx.obj.set_atomic(part); store->set_prefetch_data(&obj_ctx, part); @@ -1170,16 +1175,16 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, bool need_decompress; op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); if (op_ret < 0) { - lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; - return -EIO; + ldpp_dout(this, 0) << "ERROR: failed to decode compression info" << dendl; + return -EIO; } if (need_decompress) { if (cs_info.orig_size != ent.meta.accounted_size) { // hmm.. something wrong, object not as expected, abort! - ldout(s->cct, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size << - ", actual read size=" << ent.meta.size << dendl; + ldpp_dout(this, 0) << "ERROR: expected cs_info.orig_size=" << cs_info.orig_size + << ", actual read size=" << ent.meta.size << dendl; return -EIO; } decompress.emplace(s->cct, &cs_info, partial_content, filter); @@ -1189,7 +1194,8 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, { if (obj_size != ent.meta.size) { // hmm.. something wrong, object not as expected, abort! - ldout(s->cct, 0) << "ERROR: expected obj_size=" << obj_size << ", actual read size=" << ent.meta.size << dendl; + ldpp_dout(this, 0) << "ERROR: expected obj_size=" << obj_size + << ", actual read size=" << ent.meta.size << dendl; return -EIO; } } @@ -1201,9 +1207,9 @@ int RGWGetObj::read_user_manifest_part(rgw_bucket& bucket, /* We can use global user_acl because LOs cannot have segments * stored inside different accounts. */ if (s->system_request) { - ldout(s->cct, 2) << "overriding permissions due to system operation" << dendl; + ldpp_dout(this, 2) << "overriding permissions due to system operation" << dendl; } else if (s->auth.identity->is_admin_of(s->user->user_id)) { - ldout(s->cct, 2) << "overriding permissions due to admin operation" << dendl; + ldpp_dout(this, 2) << "overriding permissions due to admin operation" << dendl; } else if (!verify_object_permission(s, part, s->user_acl.get(), bucket_acl, &obj_policy, bucket_policy, action)) { return -EPERM; @@ -1414,7 +1420,7 @@ static int get_obj_user_manifest_iterate_cb(rgw_bucket& bucket, int RGWGetObj::handle_user_manifest(const char *prefix) { const boost::string_view prefix_view(prefix); - ldout(s->cct, 2) << "RGWGetObj::handle_user_manifest() prefix=" + ldpp_dout(this, 2) << "RGWGetObj::handle_user_manifest() prefix=" << prefix_view << dendl; const size_t pos = prefix_view.find('/'); @@ -1441,7 +1447,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix) bucket_name, bucket_info, NULL, &bucket_attrs); if (r < 0) { - ldout(s->cct, 0) << "could not get bucket info for bucket=" + ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; return r; } @@ -1450,7 +1456,7 @@ int RGWGetObj::handle_user_manifest(const char *prefix) bucket_acl = &_bucket_acl; r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket); if (r < 0) { - ldout(s->cct, 0) << "failed to read bucket policy" << dendl; + ldpp_dout(this, 0) << "failed to read bucket policy" << dendl; return r; } _bucket_policy = get_iam_policy_from_attr(s->cct, store, bucket_attrs, @@ -1517,10 +1523,10 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) try { decode(slo_info, bliter); } catch (buffer::error& err) { - ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl; return -EIO; } - ldout(s->cct, 2) << "RGWGetObj::handle_slo_manifest()" << dendl; + ldpp_dout(this, 2) << "RGWGetObj::handle_slo_manifest()" << dendl; vector allocated_acls; map>> policies; @@ -1575,7 +1581,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) bucket_name, bucket_info, nullptr, &bucket_attrs); if (r < 0) { - ldout(s->cct, 0) << "could not get bucket info for bucket=" + ldpp_dout(this, 0) << "could not get bucket info for bucket=" << bucket_name << dendl; return r; } @@ -1584,7 +1590,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) r = read_bucket_policy(store, s, bucket_info, bucket_attrs, bucket_acl, bucket); if (r < 0) { - ldout(s->cct, 0) << "failed to read bucket ACL for bucket " + ldpp_dout(this, 0) << "failed to read bucket ACL for bucket " << bucket << dendl; return r; } @@ -1607,7 +1613,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) part.obj_name = obj_name; part.size = entry.size_bytes; part.etag = entry.etag; - ldout(s->cct, 20) << "slo_part: ofs=" << ofs + ldpp_dout(this, 20) << "slo_part: ofs=" << ofs << " bucket=" << part.bucket << " obj=" << part.obj_name << " size=" << part.size @@ -1624,7 +1630,7 @@ int RGWGetObj::handle_slo_manifest(bufferlist& bl) complete_etag(etag_sum, &lo_etag); s->obj_size = slo_info.total_size; - ldout(s->cct, 20) << "s->obj_size=" << s->obj_size << dendl; + ldpp_dout(this, 20) << "s->obj_size=" << s->obj_size << dendl; int r = RGWRados::Object::Read::range_to_ofs(total_len, ofs, end); if (r < 0) { @@ -1649,7 +1655,7 @@ int RGWGetObj::get_data_cb(bufferlist& bl, off_t bl_ofs, off_t bl_len) if (start_time > gc_invalidate_time) { int r = store->defer_gc(s->obj_ctx, s->bucket_info, obj); if (r < 0) { - dout(0) << "WARNING: could not defer gc entry for obj" << dendl; + ldpp_dout(this, 0) << "WARNING: could not defer gc entry for obj" << dendl; } gc_invalidate_time = start_time; gc_invalidate_time += (s->cct->_conf->rgw_gc_obj_min_wait / 2); @@ -1762,7 +1768,7 @@ void RGWGetObj::execute() { attr_iter = attrs.find(RGW_ATTR_CRYPT_MODE); if (attr_iter != attrs.end() && attr_iter->second.to_str() == "SSE-C-AES256") { - ldout(s->cct, 0) << "ERROR: torrents are not supported for objects " + ldpp_dout(this, 0) << "ERROR: torrents are not supported for objects " "encrypted with SSE-C" << dendl; op_ret = -EINVAL; goto done_err; @@ -1771,15 +1777,14 @@ void RGWGetObj::execute() op_ret = torrent.get_torrent_file(read_op, total_len, bl, obj); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret + ldpp_dout(this, 0) << "ERROR: failed to get_torrent_file ret= " << op_ret << dendl; goto done_err; } op_ret = send_response_data(bl, 0, total_len); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to send_response_data ret= " << op_ret - << dendl; + ldpp_dout(this, 0) << "ERROR: failed to send_response_data ret= " << op_ret << dendl; goto done_err; } return; @@ -1788,7 +1793,7 @@ void RGWGetObj::execute() op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); if (op_ret < 0) { - lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; + ldpp_dout(s, 0) << "ERROR: failed to decode compression info, cannot decompress" << dendl; goto done_err; } if (need_decompress) { @@ -1801,7 +1806,7 @@ void RGWGetObj::execute() if (attr_iter != attrs.end() && !skip_manifest) { op_ret = handle_user_manifest(attr_iter->second.c_str()); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to handle user manifest ret=" + ldpp_dout(this, 0) << "ERROR: failed to handle user manifest ret=" << op_ret << dendl; goto done_err; } @@ -1813,7 +1818,7 @@ void RGWGetObj::execute() is_slo = true; op_ret = handle_slo_manifest(attr_iter->second); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret + ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl; goto done_err; } @@ -1961,7 +1966,7 @@ void RGWListBuckets::execute() if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it should exist */ - ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid=" + ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid=" << s->user->user_id << dendl; break; } @@ -2026,7 +2031,7 @@ void RGWGetUsage::execute() if (!start_date.empty()) { op_ret = utime_t::parse_date(start_date, &start_epoch, NULL); if (op_ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to parse start date" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse start date" << dendl; return; } } @@ -2034,7 +2039,7 @@ void RGWGetUsage::execute() if (!end_date.empty()) { op_ret = utime_t::parse_date(end_date, &end_epoch, NULL); if (op_ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to parse end date" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to parse end date" << dendl; return; } } @@ -2061,20 +2066,20 @@ void RGWGetUsage::execute() op_ret = rgw_user_sync_all_stats(store, s->user->user_id); if (op_ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to sync user stats" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to sync user stats" << dendl; return; } op_ret = rgw_user_get_all_buckets_stats(store, s->user->user_id, buckets_usage); if (op_ret < 0) { - ldout(store->ctx(), 0) << "ERROR: failed to get user's buckets stats" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to get user's buckets stats" << dendl; return; } string user_str = s->user->user_id.to_str(); op_ret = store->cls_user_get_header(user_str, &header); if (op_ret < 0) { - ldout(store->ctx(), 0) << "ERROR: can't read user header" << dendl; + ldpp_dout(this, 0) << "ERROR: can't read user header" << dendl; return; } @@ -2104,7 +2109,7 @@ void RGWStatAccount::execute() if (op_ret < 0) { /* hmm.. something wrong here.. the user was authenticated, so it should exist */ - ldout(s->cct, 10) << "WARNING: failed on rgw_get_user_buckets uid=" + ldpp_dout(this, 10) << "WARNING: failed on rgw_get_user_buckets uid=" << s->user->user_id << dendl; break; } else { @@ -2184,7 +2189,7 @@ void RGWSetBucketVersioning::execute() if (!store->is_meta_master()) { op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } } @@ -2219,7 +2224,7 @@ void RGWSetBucketVersioning::execute() } if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; return; } @@ -2262,7 +2267,7 @@ void RGWSetBucketWebsite::execute() if (!store->is_meta_master()) { op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << " forward_request_to_master returned ret=" << op_ret << dendl; return; } } @@ -2276,7 +2281,8 @@ void RGWSetBucketWebsite::execute() }); if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + << " returned err=" << op_ret << dendl; return; } } @@ -2301,7 +2307,8 @@ void RGWDeleteBucketWebsite::execute() return op_ret; }); if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + << " returned err=" << op_ret << dendl; return; } } @@ -2403,7 +2410,7 @@ void RGWListBucket::execute() } if (allow_unordered && !delimiter.empty()) { - ldout(s->cct, 0) << + ldpp_dout(this, 0) << "ERROR: unordered bucket listing requested with a delimiter" << dendl; op_ret = -EINVAL; return; @@ -2462,7 +2469,7 @@ int RGWCreateBucket::verify_permission() } if (s->user->user_id.tenant != s->bucket_tenant) { - ldout(s->cct, 10) << "user cannot create a bucket in a different tenant" + ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant" << " (user_id.tenant=" << s->user->user_id.tenant << " requested=" << s->bucket_tenant << ")" << dendl; @@ -2496,10 +2503,10 @@ static int forward_request_to_master(struct req_state *s, obj_version *objv, JSONParser *jp, req_info *forward_info) { if (!store->rest_master_conn) { - ldout(s->cct, 0) << "rest connection is invalid" << dendl; + ldpp_dout(s, 0) << "rest connection is invalid" << dendl; return -EINVAL; } - ldout(s->cct, 0) << "sending request to master zonegroup" << dendl; + ldpp_dout(s, 0) << "sending request to master zonegroup" << dendl; bufferlist response; string uid_str = s->user->user_id.to_str(); #define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response @@ -2508,9 +2515,9 @@ static int forward_request_to_master(struct req_state *s, obj_version *objv, if (ret < 0) return ret; - ldout(s->cct, 20) << "response: " << response.c_str() << dendl; + ldpp_dout(s, 20) << "response: " << response.c_str() << dendl; if (jp && !jp->parse(response.c_str(), response.length())) { - ldout(s->cct, 0) << "failed parsing response from master zonegroup" << dendl; + ldpp_dout(s, 0) << "failed parsing response from master zonegroup" << dendl; return -EINVAL; } @@ -2705,7 +2712,7 @@ void RGWCreateBucket::execute() if (!location_constraint.empty() && !store->has_zonegroup_api(location_constraint)) { - ldout(s->cct, 0) << "location constraint (" << location_constraint << ")" + ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" << " can't be found." << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; s->err.message = "The specified location-constraint is not valid"; @@ -2714,7 +2721,7 @@ void RGWCreateBucket::execute() if (!store->get_zonegroup().is_master_zonegroup() && !location_constraint.empty() && store->get_zonegroup().api_name != location_constraint) { - ldout(s->cct, 0) << "location constraint (" << location_constraint << ")" + ldpp_dout(this, 0) << "location constraint (" << location_constraint << ")" << " doesn't match zonegroup" << " (" << store->get_zonegroup().api_name << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; @@ -2725,7 +2732,7 @@ void RGWCreateBucket::execute() const auto& zonegroup = store->get_zonegroup(); if (!placement_rule.empty() && !zonegroup.placement_targets.count(placement_rule)) { - ldout(s->cct, 0) << "placement target (" << placement_rule << ")" + ldpp_dout(this, 0) << "placement target (" << placement_rule << ")" << " doesn't exist in the placement targets of zonegroup" << " (" << store->get_zonegroup().api_name << ")" << dendl; op_ret = -ERR_INVALID_LOCATION_CONSTRAINT; @@ -2770,8 +2777,8 @@ void RGWCreateBucket::execute() JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp); JSONDecoder::decode_json("object_ver", objv, &jp); JSONDecoder::decode_json("bucket_info", master_info, &jp); - ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; - ldout(s->cct, 20) << "got creation time: << " << master_info.creation_time << dendl; + ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; + ldpp_dout(this, 20) << "got creation time: << " << master_info.creation_time << dendl; pmaster_bucket= &master_info.bucket; creation_time = master_info.creation_time; pmaster_num_shards = &master_info.num_shards; @@ -2857,7 +2864,7 @@ void RGWCreateBucket::execute() pmaster_bucket, pmaster_num_shards, true); /* continue if EEXIST and create_bucket will fail below. this way we can * recover from a partial create by retrying it. */ - ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl; + ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret << " bucket=" << s->bucket << dendl; if (op_ret && op_ret != -EEXIST) return; @@ -2885,7 +2892,7 @@ void RGWCreateBucket::execute() op_ret = rgw_unlink_bucket(store, s->user->user_id, s->bucket.tenant, s->bucket.name); if (op_ret < 0) { - ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret + ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl; } } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) { @@ -2973,7 +2980,7 @@ void RGWDeleteBucket::execute() } if (!s->bucket_exists) { - ldout(s->cct, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl; + ldpp_dout(this, 0) << "ERROR: bucket " << s->bucket_name << " not found" << dendl; op_ret = -ERR_NO_SUCH_BUCKET; return; } @@ -2989,7 +2996,7 @@ void RGWDeleteBucket::execute() string err; ver = strict_strtol(ver_str.c_str(), 10, &err); if (!err.empty()) { - ldout(s->cct, 0) << "failed to parse ver param" << dendl; + ldpp_dout(this, 0) << "failed to parse ver param" << dendl; op_ret = -EINVAL; return; } @@ -2999,7 +3006,7 @@ void RGWDeleteBucket::execute() op_ret = rgw_bucket_sync_user_stats(store, s->user->user_id, s->bucket_info); if ( op_ret < 0) { - ldout(s->cct, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; + ldpp_dout(this, 1) << "WARNING: failed to sync user stats before bucket delete: op_ret= " << op_ret << dendl; } op_ret = store->check_bucket_empty(s->bucket_info); @@ -3055,7 +3062,7 @@ void RGWDeleteBucket::execute() op_ret = rgw_unlink_bucket(store, s->bucket_info.owner, s->bucket.tenant, s->bucket.name, false); if (op_ret < 0) { - ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret + ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl; } } @@ -3105,7 +3112,7 @@ int RGWPutObj::verify_permission() auto op_ret = get_params(); if (op_ret < 0) { - ldout(s->cct, 20) << "get_params() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "get_params() returned ret=" << op_ret << dendl; return op_ret; } @@ -3168,7 +3175,7 @@ int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand) part_num = s->info.args.get("partNumber"); if (part_num.empty()) { - ldout(s->cct, 10) << "part number is empty" << dendl; + ldpp_dout(s, 10) << "part number is empty" << dendl; return -EINVAL; } @@ -3176,7 +3183,7 @@ int RGWPutObjProcessor_Multipart::prepare(RGWRados *store, string *oid_rand) uint64_t num = (uint64_t)strict_strtol(part_num.c_str(), 10, &err); if (!err.empty()) { - ldout(s->cct, 10) << "bad part number: " << part_num << ": " << err << dendl; + ldpp_dout(s, 10) << "bad part number: " << part_num << ": " << err << dendl; return -EINVAL; } @@ -3372,8 +3379,8 @@ int RGWPutObj::get_data(const off_t fst, const off_t lst, bufferlist& bl) bool need_decompress; op_ret = rgw_compression_info_from_attrset(attrs, need_decompress, cs_info); if (op_ret < 0) { - lderr(s->cct) << "ERROR: failed to decode compression info, cannot decompress" << dendl; - return -EIO; + ldpp_dout(s, 0) << "ERROR: failed to decode compression info" << dendl; + return -EIO; } bool partial_content = true; @@ -3470,7 +3477,7 @@ void RGWPutObj::execute() op_ret = get_system_versioning_params(s, &olh_epoch, &version_id); if (op_ret < 0) { - ldout(s->cct, 20) << "get_system_versioning_params() returned ret=" + ldpp_dout(this, 20) << "get_system_versioning_params() returned ret=" << op_ret << dendl; goto done; } @@ -3478,17 +3485,17 @@ void RGWPutObj::execute() if (supplied_md5_b64) { need_calc_md5 = true; - ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; + ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); - ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl; + ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl; if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { op_ret = -ERR_INVALID_DIGEST; goto done; } buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); - ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl; + ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl; } if (!chunked_upload) { /* with chunked upload we don't know how big is the upload. @@ -3496,12 +3503,12 @@ void RGWPutObj::execute() op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket, user_quota, bucket_quota, s->content_length); if (op_ret < 0) { - ldout(s->cct, 20) << "check_quota() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "check_quota() returned ret=" << op_ret << dendl; goto done; } op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); if (op_ret < 0) { - ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; goto done; } } @@ -3530,7 +3537,7 @@ void RGWPutObj::execute() op_ret = processor->prepare(store, NULL); if (op_ret < 0) { - ldout(s->cct, 20) << "processor->prepare() returned ret=" << op_ret + ldpp_dout(this, 20) << "processor->prepare() returned ret=" << op_ret << dendl; goto done; } @@ -3543,7 +3550,7 @@ void RGWPutObj::execute() op_ret = store->get_obj_state(static_cast(s->obj_ctx), copy_source_bucket_info, obj, &astate, true, false); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: get copy source obj state returned with error" << op_ret << dendl; goto done; } if (!astate->exists){ @@ -3568,7 +3575,7 @@ void RGWPutObj::execute() if (compression_type != "none") { plugin = get_compressor_plugin(s, compression_type); if (!plugin) { - ldout(s->cct, 1) << "Cannot load plugin for compression type " + ldpp_dout(this, 1) << "Cannot load plugin for compression type " << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); @@ -3595,7 +3602,7 @@ void RGWPutObj::execute() } if (len < 0) { op_ret = len; - ldout(s->cct, 20) << "get_data() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "get_data() returned ret=" << op_ret << dendl; goto done; } @@ -3620,12 +3627,12 @@ void RGWPutObj::execute() op_ret = put_data_and_throttle(filter, data, ofs, need_to_wait); if (op_ret < 0) { if (!need_to_wait || op_ret != -EEXIST) { - ldout(s->cct, 20) << "processor->thottle_data() returned ret=" + ldpp_dout(this, 20) << "processor->thottle_data() returned ret=" << op_ret << dendl; goto done; } /* need_to_wait == true and op_ret == -EEXIST */ - ldout(s->cct, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl; + ldpp_dout(this, 5) << "NOTICE: processor->throttle_data() returned -EEXIST, need to restart write" << dendl; /* restore original data */ data.swap(orig_data); @@ -3643,7 +3650,7 @@ void RGWPutObj::execute() op_ret = processor->prepare(store, &oid_rand); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: processor->prepare() returned " + ldpp_dout(this, 0) << "ERROR: processor->prepare() returned " << op_ret << dendl; goto done; } @@ -3694,13 +3701,13 @@ void RGWPutObj::execute() op_ret = store->check_quota(s->bucket_owner.get_id(), s->bucket, user_quota, bucket_quota, s->obj_size); if (op_ret < 0) { - ldout(s->cct, 20) << "second check_quota() returned op_ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "second check_quota() returned op_ret=" << op_ret << dendl; goto done; } op_ret = store->check_bucket_shards(s->bucket_info, s->bucket, bucket_quota); if (op_ret < 0) { - ldout(s->cct, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "check_bucket_shards() returned ret=" << op_ret << dendl; goto done; } @@ -3714,7 +3721,7 @@ void RGWPutObj::execute() cs_info.blocks = move(compressor->get_compression_blocks()); encode(cs_info, tmp); attrs[RGW_ATTR_COMPRESSION] = tmp; - ldout(s->cct, 20) << "storing " << RGW_ATTR_COMPRESSION + ldpp_dout(this, 20) << "storing " << RGW_ATTR_COMPRESSION << " with type=" << cs_info.compression_type << ", orig_size=" << cs_info.orig_size << ", blocks=" << cs_info.blocks.size() << dendl; @@ -3735,11 +3742,11 @@ void RGWPutObj::execute() if (dlo_manifest) { op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl; + ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl; goto done; } complete_etag(hash, &etag); - ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; + ldpp_dout(this, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; } if (slo_info) { @@ -3749,7 +3756,7 @@ void RGWPutObj::execute() hash.Update((unsigned char *)slo_info->raw_data, slo_info->raw_data_len); complete_etag(hash, &etag); - ldout(s->cct, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; + ldpp_dout(this, 10) << __func__ << ": calculated md5 for user manifest: " << etag << dendl; } if (supplied_etag && etag.compare(supplied_etag) != 0) { @@ -3794,7 +3801,7 @@ void RGWPutObj::execute() op_ret = torrent.complete(); if (0 != op_ret) { - ldout(s->cct, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: torrent.handle_data() returned " << op_ret << dendl; goto done; } } @@ -3874,17 +3881,17 @@ void RGWPostObj::execute() if (supplied_md5_b64) { char supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1]; - ldout(s->cct, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; + ldpp_dout(this, 15) << "supplied_md5_b64=" << supplied_md5_b64 << dendl; op_ret = ceph_unarmor(supplied_md5_bin, &supplied_md5_bin[CEPH_CRYPTO_MD5_DIGESTSIZE + 1], supplied_md5_b64, supplied_md5_b64 + strlen(supplied_md5_b64)); - ldout(s->cct, 15) << "ceph_armor ret=" << op_ret << dendl; + ldpp_dout(this, 15) << "ceph_armor ret=" << op_ret << dendl; if (op_ret != CEPH_CRYPTO_MD5_DIGESTSIZE) { op_ret = -ERR_INVALID_DIGEST; return; } buf_to_hex((const unsigned char *)supplied_md5_bin, CEPH_CRYPTO_MD5_DIGESTSIZE, supplied_md5); - ldout(s->cct, 15) << "supplied_md5=" << supplied_md5 << dendl; + ldpp_dout(this, 15) << "supplied_md5=" << supplied_md5 << dendl; } RGWPutObjProcessor_Atomic processor(*static_cast(s->obj_ctx), @@ -3915,7 +3922,7 @@ void RGWPostObj::execute() if (compression_type != "none") { plugin = Compressor::create(s->cct, compression_type); if (!plugin) { - ldout(s->cct, 1) << "Cannot load plugin for compression type " + ldpp_dout(this, 1) << "Cannot load plugin for compression type " << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); @@ -4285,7 +4292,7 @@ void RGWPutMetadataObject::execute() if (dlo_manifest) { op_ret = encode_dlo_manifest_attr(dlo_manifest, attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "bad user manifest: " << dlo_manifest << dendl; + ldpp_dout(this, 0) << "bad user manifest: " << dlo_manifest << dendl; return; } } @@ -4300,7 +4307,7 @@ int RGWDeleteObj::handle_slo_manifest(bufferlist& bl) try { decode(slo_info, bliter); } catch (buffer::error& err) { - ldout(s->cct, 0) << "ERROR: failed to decode slo manifest" << dendl; + ldpp_dout(this, 0) << "ERROR: failed to decode slo manifest" << dendl; return -EIO; } @@ -4363,7 +4370,7 @@ int RGWDeleteObj::verify_permission() if (s->bucket_info.mfa_enabled() && !s->object.instance.empty() && !s->mfa_verified) { - ldout(s->cct, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl; + ldpp_dout(this, 5) << "NOTICE: object delete request with a versioned object, mfa auth not provided" << dendl; return -ERR_MFA_REQUIRED; } @@ -4406,7 +4413,7 @@ void RGWDeleteObj::execute() if (slo_attr != attrs.end()) { op_ret = handle_slo_manifest(slo_attr->second); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "ERROR: failed to handle slo manifest ret=" << op_ret << dendl; } } else { op_ret = -ERR_NOT_SLO_MANIFEST; @@ -4891,7 +4898,7 @@ void RGWPutACLs::execute() op_ret = get_params(); if (op_ret < 0) { if (op_ret == -ERANGE) { - ldout(s->cct, 4) << "The size of request xml data is larger than the max limitation, data size = " + ldpp_dout(this, 4) << "The size of request xml data is larger than the max limitation, data size = " << s->length << dendl; op_ret = -ERR_MALFORMED_XML; s->err.message = "The XML you provided was larger than the maximum " + @@ -4901,7 +4908,7 @@ void RGWPutACLs::execute() return; } - ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; + ldpp_dout(this, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; if (!s->canned_acl.empty() && len) { op_ret = -EINVAL; @@ -4939,10 +4946,8 @@ void RGWPutACLs::execute() int grants_num = req_grant_map.size(); if (grants_num > max_num) { - ldout(s->cct, 4) << "An acl can have up to " - << max_num - << " grants, request acl grants num: " - << grants_num << dendl; + ldpp_dout(this, 4) << "An acl can have up to " << max_num + << " grants, request acl grants num: " << grants_num << dendl; op_ret = -ERR_MALFORMED_ACL_ERROR; s->err.message = "The request is rejected, because the acl grants number you requested is larger than the maximum " + std::to_string(max_num) @@ -4959,13 +4964,13 @@ void RGWPutACLs::execute() } op_ret = forward_request_to_master(s, NULL, store, in_data, NULL); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } } if (s->cct->_conf->subsys.should_gather()) { - ldout(s->cct, 15) << "Old AccessControlPolicy"; + ldpp_dout(this, 15) << "Old AccessControlPolicy"; policy->to_xml(*_dout); *_dout << dendl; } @@ -4975,7 +4980,7 @@ void RGWPutACLs::execute() return; if (s->cct->_conf->subsys.should_gather()) { - ldout(s->cct, 15) << "New AccessControlPolicy:"; + ldpp_dout(this, 15) << "New AccessControlPolicy:"; new_policy.to_xml(*_dout); *_dout << dendl; } @@ -5022,7 +5027,7 @@ void RGWPutLC::execute() if (content_md5 == nullptr) { op_ret = -ERR_INVALID_REQUEST; s->err.message = "Missing required header for this request: Content-MD5"; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; return; } @@ -5032,7 +5037,7 @@ void RGWPutLC::execute() } catch (...) { s->err.message = "Request header Content-MD5 contains character " "that is not base64 encoded."; - ldout(s->cct, 5) << s->err.message << dendl; + ldpp_dout(this, 5) << s->err.message << dendl; op_ret = -ERR_BAD_DIGEST; return; } @@ -5046,7 +5051,7 @@ void RGWPutLC::execute() if (op_ret < 0) return; - ldout(s->cct, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; + ldpp_dout(this, 15) << "read len=" << len << " data=" << (data ? data : "") << dendl; MD5 data_hash; unsigned char data_hash_res[CEPH_CRYPTO_MD5_DIGESTSIZE]; @@ -5056,7 +5061,7 @@ void RGWPutLC::execute() if (memcmp(data_hash_res, content_md5_bin.c_str(), CEPH_CRYPTO_MD5_DIGESTSIZE) != 0) { op_ret = -ERR_BAD_DIGEST; s->err.message = "The Content-MD5 you specified did not match what we received."; - ldout(s->cct, 5) << s->err.message + ldpp_dout(this, 5) << s->err.message << " Specified content md5: " << content_md5 << ", calculated content md5: " << data_hash_res << dendl; @@ -5078,7 +5083,7 @@ void RGWPutLC::execute() return; if (s->cct->_conf->subsys.should_gather()) { - ldout(s->cct, 15) << "New LifecycleConfiguration:"; + ldpp_dout(this, 15) << "New LifecycleConfiguration:"; new_config.to_xml(*_dout); *_dout << dendl; } @@ -5103,17 +5108,20 @@ void RGWPutLC::execute() do { op_ret = l.lock_exclusive(ctx, oid); if (op_ret == -EBUSY) { - dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl; - sleep(5); + ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + << oid << ", sleep 5, try again" << dendl; + sleep(5); // XXX: return retryable error continue; } if (op_ret < 0) { - dout(0) << "RGWLC::RGWPutLC() failed to acquire lock on " << oid << ", ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to acquire lock on " + << oid << ", ret=" << op_ret << dendl; break; } op_ret = cls_rgw_lc_set_entry(*ctx, oid, entry); if (op_ret < 0) { - dout(0) << "RGWLC::RGWPutLC() failed to set entry on " << oid << ", ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "RGWLC::RGWPutLC() failed to set entry on " + << oid << ", ret=" << op_ret << dendl; } break; }while(1); @@ -5128,8 +5136,8 @@ void RGWDeleteLC::execute() op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); if (op_ret < 0) { - ldout(s->cct, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" << s->bucket.name - << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to set attrs on bucket=" + << s->bucket.name << " returned err=" << op_ret << dendl; return; } string shard_id = s->bucket.tenant + ':' + s->bucket.name + ':' + s->bucket.bucket_id; @@ -5144,17 +5152,20 @@ void RGWDeleteLC::execute() do { op_ret = l.lock_exclusive(ctx, oid); if (op_ret == -EBUSY) { - dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " << oid << ", sleep 5, try again" << dendl; - sleep(5); + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " + << oid << ", sleep 5, try again" << dendl; + sleep(5); // XXX: return retryable error continue; } if (op_ret < 0) { - dout(0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " << oid << ", ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to acquire lock on " + << oid << ", ret=" << op_ret << dendl; break; } op_ret = cls_rgw_lc_rm_entry(*ctx, oid, entry); if (op_ret < 0) { - dout(0) << "RGWLC::RGWDeleteLC() failed to rm entry on " << oid << ", ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "RGWLC::RGWDeleteLC() failed to rm entry on " + << oid << ", ret=" << op_ret << dendl; } break; }while(1); @@ -5174,7 +5185,7 @@ void RGWGetCORS::execute() return ; if (!cors_exist) { - dout(2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; op_ret = -ENOENT; return; } @@ -5196,7 +5207,7 @@ void RGWPutCORS::execute() if (!store->is_meta_master()) { op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); if (op_ret < 0) { - ldout(s->cct, 20) << __func__ << " forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } } @@ -5222,7 +5233,7 @@ void RGWDeleteCORS::execute() return op_ret; if (!cors_exist) { - dout(2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; op_ret = -ENOENT; return op_ret; } @@ -5232,7 +5243,7 @@ void RGWDeleteCORS::execute() op_ret = rgw_bucket_set_attrs(store, s->bucket_info, attrs, &s->bucket_info.objv_tracker); if (op_ret < 0) { - ldout(s->cct, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name + ldpp_dout(this, 0) << "RGWLC::RGWDeleteCORS() failed to set attrs on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; } return op_ret; @@ -5246,7 +5257,7 @@ void RGWOptionsCORS::get_response_params(string& hdrs, string& exp_hdrs, unsigne int RGWOptionsCORS::validate_cors_request(RGWCORSConfiguration *cc) { rule = cc->host_name_rule(origin); if (!rule) { - dout(10) << "There is no cors rule present for " << origin << dendl; + ldpp_dout(this, 10) << "There is no cors rule present for " << origin << dendl; return -ENOENT; } @@ -5269,22 +5280,18 @@ void RGWOptionsCORS::execute() origin = s->info.env->get("HTTP_ORIGIN"); if (!origin) { - dout(0) << - "Preflight request without mandatory Origin header" - << dendl; + ldpp_dout(this, 0) << "Missing mandatory Origin header" << dendl; op_ret = -EINVAL; return; } req_meth = s->info.env->get("HTTP_ACCESS_CONTROL_REQUEST_METHOD"); if (!req_meth) { - dout(0) << - "Preflight request without mandatory Access-control-request-method header" - << dendl; + ldpp_dout(this, 0) << "Missing mandatory Access-control-request-method header" << dendl; op_ret = -EINVAL; return; } if (!cors_exist) { - dout(2) << "No CORS configuration set yet for this bucket" << dendl; + ldpp_dout(this, 2) << "No CORS configuration set yet for this bucket" << dendl; op_ret = -ENOENT; return; } @@ -5333,7 +5340,7 @@ void RGWSetRequestPayment::execute() op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; return; } @@ -5448,7 +5455,7 @@ static int get_multipart_info(RGWRados *store, struct req_state *s, try { decode(*policy, bli); } catch (buffer::error& err) { - ldout(s->cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; + ldpp_dout(s, 0) << "ERROR: could not decode policy" << dendl; return -EIO; } break; @@ -5578,7 +5585,7 @@ void RGWCompleteMultipart::execute() op_ret = serializer.try_lock(raw_obj.oid, dur); if (op_ret < 0) { - dout(0) << "RGWCompleteMultipart::execute() failed to acquire lock " << dendl; + ldpp_dout(this, 0) << "failed to acquire lock" << dendl; op_ret = -ERR_INTERNAL_ERROR; s->err.message = "This multipart completion is already in progress"; return; @@ -5587,7 +5594,7 @@ void RGWCompleteMultipart::execute() op_ret = get_obj_attrs(store, s, meta_obj, attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj + ldpp_dout(this, 0) << "ERROR: failed to get obj attrs, obj=" << meta_obj << " ret=" << op_ret << dendl; return; } @@ -5603,7 +5610,7 @@ void RGWCompleteMultipart::execute() total_parts += obj_parts.size(); if (!truncated && total_parts != (int)parts->parts.size()) { - ldout(s->cct, 0) << "NOTICE: total parts mismatch: have: " << total_parts + ldpp_dout(this, 0) << "NOTICE: total parts mismatch: have: " << total_parts << " expected: " << parts->parts.size() << dendl; op_ret = -ERR_INVALID_PART; return; @@ -5619,7 +5626,7 @@ void RGWCompleteMultipart::execute() char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; if (iter->first != (int)obj_iter->first) { - ldout(s->cct, 0) << "NOTICE: parts num mismatch: next requested: " + ldpp_dout(this, 0) << "NOTICE: parts num mismatch: next requested: " << iter->first << " next uploaded: " << obj_iter->first << dendl; op_ret = -ERR_INVALID_PART; @@ -5627,7 +5634,7 @@ void RGWCompleteMultipart::execute() } string part_etag = rgw_string_unquote(iter->second); if (part_etag.compare(obj_iter->second.etag) != 0) { - ldout(s->cct, 0) << "NOTICE: etag mismatch: part: " << iter->first + ldpp_dout(this, 0) << "NOTICE: etag mismatch: part: " << iter->first << " etag: " << iter->second << dendl; op_ret = -ERR_INVALID_PART; return; @@ -5645,7 +5652,7 @@ void RGWCompleteMultipart::execute() src_obj.init_ns(s->bucket, oid, mp_ns); if (obj_part.manifest.empty()) { - ldout(s->cct, 0) << "ERROR: empty manifest for object part: obj=" + ldpp_dout(this, 0) << "ERROR: empty manifest for object part: obj=" << src_obj << dendl; op_ret = -ERR_INVALID_PART; return; @@ -5657,7 +5664,7 @@ void RGWCompleteMultipart::execute() if ((obj_iter != obj_parts.begin()) && ((part_compressed != compressed) || (cs_info.compression_type != obj_part.cs_info.compression_type))) { - ldout(s->cct, 0) << "ERROR: compression type was changed during multipart upload (" + ldpp_dout(this, 0) << "ERROR: compression type was changed during multipart upload (" << cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl; op_ret = -ERR_INVALID_PART; return; @@ -5698,7 +5705,7 @@ void RGWCompleteMultipart::execute() snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld", (long long)parts->parts.size()); etag = final_etag_str; - ldout(s->cct, 10) << "calculated etag: " << final_etag_str << dendl; + ldpp_dout(this, 10) << "calculated etag: " << final_etag_str << dendl; etag_bl.append(final_etag_str, strlen(final_etag_str)); @@ -5747,8 +5754,7 @@ void RGWCompleteMultipart::execute() /* serializer's exclusive lock is released */ serializer.clear_locked(); } else { - ldout(store->ctx(), 0) << "WARNING: failed to remove object " - << meta_obj << dendl; + ldpp_dout(this, 0) << "WARNING: failed to remove object " << meta_obj << dendl; } } @@ -5773,8 +5779,7 @@ void RGWCompleteMultipart::complete() if (unlikely(serializer.locked)) { int r = serializer.unlock(); if (r < 0) { - ldout(store->ctx(), 0) << "WARNING: failed to unlock " - << serializer.oid << dendl; + ldpp_dout(this, 0) << "WARNING: failed to unlock " << serializer.oid << dendl; } } send_response(); @@ -5991,7 +5996,7 @@ void RGWDeleteMultiObj::execute() } } if (has_versioned && !s->mfa_verified) { - ldout(s->cct, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl; + ldpp_dout(this, 5) << "NOTICE: multi-object delete request with a versioned object, mfa auth not provided" << dendl; op_ret = -ERR_MFA_REQUIRED; goto error; } @@ -6119,8 +6124,7 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path) ret = rgw_unlink_bucket(store, binfo.owner, binfo.bucket.tenant, binfo.bucket.name, false); if (ret < 0) { - ldout(s->cct, 0) << "WARNING: failed to unlink bucket: ret=" << ret - << dendl; + ldpp_dout(s, 0) << "WARNING: failed to unlink bucket: ret=" << ret << dendl; } } if (ret < 0) { @@ -6148,11 +6152,10 @@ bool RGWBulkDelete::Deleter::delete_single(const acct_path_t& path) binfo_fail: if (-ENOENT == ret) { - ldout(store->ctx(), 20) << "cannot find bucket = " << path.bucket_name << dendl; + ldpp_dout(s, 20) << "cannot find bucket = " << path.bucket_name << dendl; num_unfound++; } else { - ldout(store->ctx(), 20) << "cannot get bucket info, ret = " << ret - << dendl; + ldpp_dout(s, 20) << "cannot get bucket info, ret = " << ret << dendl; fail_desc_t failed_item = { .err = ret, @@ -6163,7 +6166,7 @@ binfo_fail: return false; auth_fail: - ldout(store->ctx(), 20) << "wrong auth for " << path << dendl; + ldpp_dout(s, 20) << "wrong auth for " << path << dendl; { fail_desc_t failed_item = { .err = ret, @@ -6175,7 +6178,7 @@ auth_fail: delop_fail: if (-ENOENT == ret) { - ldout(store->ctx(), 20) << "cannot find entry " << path << dendl; + ldpp_dout(s, 20) << "cannot find entry " << path << dendl; num_unfound++; } else { fail_desc_t failed_item = { @@ -6189,9 +6192,9 @@ delop_fail: bool RGWBulkDelete::Deleter::delete_chunk(const std::list& paths) { - ldout(store->ctx(), 20) << "in delete_chunk" << dendl; + ldpp_dout(s, 20) << "in delete_chunk" << dendl; for (auto path : paths) { - ldout(store->ctx(), 20) << "bulk deleting path: " << path << dendl; + ldpp_dout(s, 20) << "bulk deleting path: " << path << dendl; delete_single(path); } @@ -6241,10 +6244,9 @@ int RGWBulkUploadOp::verify_permission() } if (s->user->user_id.tenant != s->bucket_tenant) { - ldout(s->cct, 10) << "user cannot create a bucket in a different tenant" - << " (user_id.tenant=" << s->user->user_id.tenant - << " requested=" << s->bucket_tenant << ")" - << dendl; + ldpp_dout(this, 10) << "user cannot create a bucket in a different tenant" + << " (user_id.tenant=" << s->user->user_id.tenant + << " requested=" << s->bucket_tenant << ")" << dendl; return -EACCES; } @@ -6346,7 +6348,7 @@ static void forward_req_info(CephContext *cct, req_info& info, const std::string int RGWBulkUploadOp::handle_dir(const boost::string_ref path) { - ldout(s->cct, 20) << "bulk upload: got directory=" << path << dendl; + ldpp_dout(this, 20) << "got directory=" << path << dendl; op_ret = handle_dir_verify_permission(); if (op_ret < 0) { @@ -6403,10 +6405,8 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path) JSONDecoder::decode_json("object_ver", objv, &jp); JSONDecoder::decode_json("bucket_info", master_info, &jp); - ldout(s->cct, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" - << objv.ver << dendl; - ldout(s->cct, 20) << "got creation_time="<< master_info.creation_time - << dendl; + ldpp_dout(this, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl; + ldpp_dout(this, 20) << "got creation_time="<< master_info.creation_time << dendl; pmaster_bucket= &master_info.bucket; creation_time = master_info.creation_time; @@ -6431,7 +6431,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path) nullptr); if (selected_placement_rule != binfo.placement_rule) { op_ret = -EEXIST; - ldout(s->cct, 20) << "bulk upload: non-coherent placement rule" << dendl; + ldpp_dout(this, 20) << "non-coherent placement rule" << dendl; return op_ret; } } @@ -6462,8 +6462,8 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path) pmaster_bucket, pmaster_num_shards, true); /* continue if EEXIST and create_bucket will fail below. this way we can * recover from a partial create by retrying it. */ - ldout(s->cct, 20) << "rgw_create_bucket returned ret=" << op_ret - << ", bucket=" << bucket << dendl; + ldpp_dout(this, 20) << "rgw_create_bucket returned ret=" << op_ret + << ", bucket=" << bucket << dendl; if (op_ret && op_ret != -EEXIST) { return op_ret; @@ -6479,7 +6479,7 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path) */ if (out_info.owner.compare(s->user->user_id) != 0) { op_ret = -EEXIST; - ldout(s->cct, 20) << "bulk upload: conflicting bucket name" << dendl; + ldpp_dout(this, 20) << "conflicting bucket name" << dendl; return op_ret; } bucket = out_info.bucket; @@ -6492,12 +6492,10 @@ int RGWBulkUploadOp::handle_dir(const boost::string_ref path) op_ret = rgw_unlink_bucket(store, s->user->user_id, bucket.tenant, bucket.name); if (op_ret < 0) { - ldout(s->cct, 0) << "bulk upload: WARNING: failed to unlink bucket: ret=" - << op_ret << dendl; + ldpp_dout(this, 0) << "WARNING: failed to unlink bucket: ret=" << op_ret << dendl; } } else if (op_ret == -EEXIST || (op_ret == 0 && existed)) { - ldout(s->cct, 20) << "bulk upload: containers already exists" - << dendl; + ldpp_dout(this, 20) << "containers already exists" << dendl; op_ret = -ERR_BUCKET_EXISTS; } @@ -6513,8 +6511,7 @@ bool RGWBulkUploadOp::handle_file_verify_permission(RGWBucketInfo& binfo, RGWAccessControlPolicy bacl(store->ctx()); op_ret = read_bucket_policy(store, s, binfo, battrs, &bacl, binfo.bucket); if (op_ret < 0) { - ldout(s->cct, 20) << "bulk upload: cannot read_policy() for bucket" - << dendl; + ldpp_dout(this, 20) << "cannot read_policy() for bucket" << dendl; return false; } @@ -6540,8 +6537,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, AlignedStreamGetter& body) { - ldout(s->cct, 20) << "bulk upload: got file=" << path << ", size=" << size - << dendl; + ldpp_dout(this, 20) << "got file=" << path << ", size=" << size << dendl; RGWPutObjDataProcessor *filter = nullptr; boost::optional compressor; @@ -6562,8 +6558,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, op_ret = store->get_bucket_info(obj_ctx, s->user->user_id.tenant, bucket_name, binfo, nullptr, &battrs); if (op_ret == -ENOENT) { - ldout(s->cct, 20) << "bulk upload: non existent directory=" << bucket_name - << dendl; + ldpp_dout(this, 20) << "non existent directory=" << bucket_name << dendl; } else if (op_ret < 0) { return op_ret; } @@ -6571,7 +6566,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, if (! handle_file_verify_permission(binfo, rgw_obj(binfo.bucket, object), battrs, bowner)) { - ldout(s->cct, 20) << "bulk upload: object creation unauthorized" << dendl; + ldpp_dout(this, 20) << "object creation unauthorized" << dendl; op_ret = -EACCES; return op_ret; } @@ -6601,8 +6596,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, op_ret = processor.prepare(store, nullptr); if (op_ret < 0) { - ldout(s->cct, 20) << "bulk upload: cannot prepare processor due to ret=" - << op_ret << dendl; + ldpp_dout(this, 20) << "cannot prepare processor due to ret=" << op_ret << dendl; return op_ret; } @@ -6612,8 +6606,8 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, if (compression_type != "none") { plugin = Compressor::create(s->cct, compression_type); if (! plugin) { - ldout(s->cct, 1) << "Cannot load plugin for rgw_compression_type " - << compression_type << dendl; + ldpp_dout(this, 1) << "Cannot load plugin for rgw_compression_type " + << compression_type << dendl; } else { compressor.emplace(s->cct, plugin, filter); filter = &*compressor; @@ -6628,7 +6622,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, ceph::bufferlist data; len = body.get_at_most(s->cct->_conf->rgw_max_chunk_size, data); - ldout(s->cct, 20) << "bulk upload: body=" << data.c_str() << dendl; + ldpp_dout(this, 20) << "body=" << data.c_str() << dendl; if (len < 0) { op_ret = len; return op_ret; @@ -6636,8 +6630,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, hash.Update((const unsigned char *)data.c_str(), data.length()); op_ret = put_data_and_throttle(filter, data, ofs, false); if (op_ret < 0) { - ldout(s->cct, 20) << "processor->thottle_data() returned ret=" - << op_ret << dendl; + ldpp_dout(this, 20) << "processor->thottle_data() returned ret=" << op_ret << dendl; return op_ret; } @@ -6647,16 +6640,14 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, } while (len > 0); if (ofs != size) { - ldout(s->cct, 10) << "bulk upload: real file size different from declared" - << dendl; + ldpp_dout(this, 10) << "real file size different from declared" << dendl; op_ret = -EINVAL; } op_ret = store->check_quota(bowner.get_id(), binfo.bucket, user_quota, bucket_quota, size); if (op_ret < 0) { - ldout(s->cct, 20) << "bulk upload: quota exceeded for path=" << path - << dendl; + ldpp_dout(this, 20) << "quota exceeded for path=" << path << dendl; return op_ret; } @@ -6699,8 +6690,7 @@ int RGWBulkUploadOp::handle_file(const boost::string_ref path, op_ret = processor.complete(size, etag, nullptr, ceph::real_time(), attrs, ceph::real_time() /* delete_at */); if (op_ret < 0) { - ldout(s->cct, 20) << "bulk upload: processor::complete returned op_ret=" - << op_ret << dendl; + ldpp_dout(this, 20) << "processor::complete returned op_ret=" << op_ret << dendl; } return op_ret; @@ -6710,7 +6700,7 @@ void RGWBulkUploadOp::execute() { ceph::bufferlist buffer(64 * 1024); - ldout(s->cct, 20) << "bulk upload: start" << dendl; + ldpp_dout(this, 20) << "start" << dendl; /* Create an instance of stream-abstracting class. Having this indirection * allows for easy introduction of decompressors like gzip and bzip2. */ @@ -6728,7 +6718,7 @@ void RGWBulkUploadOp::execute() do { op_ret = stream->get_exactly(rgw::tar::BLOCK_SIZE, buffer); if (op_ret < 0) { - ldout(s->cct, 2) << "bulk upload: cannot read header" << dendl; + ldpp_dout(this, 2) << "cannot read header" << dendl; return; } @@ -6746,7 +6736,7 @@ void RGWBulkUploadOp::execute() * will be ignored but won't cease the whole upload. */ switch (header->get_filetype()) { case rgw::tar::FileType::NORMAL_FILE: { - ldout(s->cct, 2) << "bulk upload: handling regular file" << dendl; + ldpp_dout(this, 2) << "handling regular file" << dendl; boost::string_ref filename = bucket_path.empty() ? header->get_filename() : \ file_prefix + header->get_filename().to_string(); @@ -6764,7 +6754,7 @@ void RGWBulkUploadOp::execute() break; } case rgw::tar::FileType::DIRECTORY: { - ldout(s->cct, 2) << "bulk upload: handling regular directory" << dendl; + ldpp_dout(this, 2) << "handling regular directory" << dendl; boost::string_ref dirname = bucket_path.empty() ? header->get_filename() : bucket_path; op_ret = handle_dir(dirname); @@ -6784,12 +6774,11 @@ void RGWBulkUploadOp::execute() * terminates whole upload immediately. */ if (boost::algorithm::contains(std::initializer_list{ op_ret }, terminal_errors)) { - ldout(s->cct, 2) << "bulk upload: terminating due to ret=" << op_ret - << dendl; + ldpp_dout(this, 2) << "terminating due to ret=" << op_ret << dendl; break; } } else { - ldout(s->cct, 2) << "bulk upload: an empty block" << dendl; + ldpp_dout(this, 2) << "an empty block" << dendl; op_ret = 0; } @@ -6912,7 +6901,7 @@ void RGWConfigBucketMetaSearch::execute() { op_ret = get_params(); if (op_ret < 0) { - ldout(s->cct, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "NOTICE: get_params() returned ret=" << op_ret << dendl; return; } @@ -6920,7 +6909,8 @@ void RGWConfigBucketMetaSearch::execute() op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + << " returned err=" << op_ret << dendl; return; } } @@ -6959,7 +6949,8 @@ void RGWDelBucketMetaSearch::execute() op_ret = store->put_bucket_instance_info(s->bucket_info, false, real_time(), &s->bucket_attrs); if (op_ret < 0) { - ldout(s->cct, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name << " returned err=" << op_ret << dendl; + ldpp_dout(this, 0) << "NOTICE: put_bucket_info on bucket=" << s->bucket.name + << " returned err=" << op_ret << dendl; return; } } @@ -6983,7 +6974,7 @@ int RGWHandler::do_init_permissions() { int ret = rgw_build_bucket_policies(store, s); if (ret < 0) { - ldout(s->cct, 10) << "init_permissions on " << s->bucket + ldpp_dout(s, 10) << "init_permissions on " << s->bucket << " failed, ret=" << ret << dendl; return ret==-ENODATA ? -EACCES : ret; } @@ -7001,7 +6992,7 @@ int RGWHandler::do_read_permissions(RGWOp *op, bool only_bucket) int ret = rgw_build_object_policies(store, s, op->prefetch_data()); if (ret < 0) { - ldout(s->cct, 10) << "read_permissions on " << s->bucket << ":" + ldpp_dout(op, 10) << "read_permissions on " << s->bucket << ":" << s->object << " only_bucket=" << only_bucket << " ret=" << ret << dendl; if (ret == -ENODATA) @@ -7066,7 +7057,7 @@ void RGWPutBucketPolicy::execute() if (!store->is_meta_master()) { op_ret = forward_request_to_master(s, NULL, store, in_data, nullptr); if (op_ret < 0) { - ldout(s->cct, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; + ldpp_dout(this, 20) << "forward_request_to_master returned ret=" << op_ret << dendl; return; } } @@ -7082,7 +7073,7 @@ void RGWPutBucketPolicy::execute() return op_ret; }); } catch (rgw::IAM::PolicyParseException& e) { - ldout(s->cct, 20) << "failed to parse policy: " << e.what() << dendl; + ldpp_dout(this, 20) << "failed to parse policy: " << e.what() << dendl; op_ret = -EINVAL; } } @@ -7111,8 +7102,8 @@ void RGWGetBucketPolicy::execute() auto attrs = s->bucket_attrs; map::iterator aiter = attrs.find(RGW_ATTR_IAM_POLICY); if (aiter == attrs.end()) { - ldout(s->cct, 0) << __func__ << " can't find bucket IAM POLICY attr" - << " bucket_name = " << s->bucket_name << dendl; + ldpp_dout(this, 0) << "can't find bucket IAM POLICY attr bucket_name = " + << s->bucket_name << dendl; op_ret = -ERR_NO_SUCH_BUCKET_POLICY; s->err.message = "The bucket policy does not exist"; return; @@ -7120,7 +7111,8 @@ void RGWGetBucketPolicy::execute() policy = attrs[RGW_ATTR_IAM_POLICY]; if (policy.length() == 0) { - ldout(s->cct, 10) << "The bucket policy does not exist, bucket: " << s->bucket_name << dendl; + ldpp_dout(this, 10) << "The bucket policy does not exist, bucket: " + << s->bucket_name << dendl; op_ret = -ERR_NO_SUCH_BUCKET_POLICY; s->err.message = "The bucket policy does not exist"; return;